From dadd7ec92684197f5c935ee62214a7fec5254abd Mon Sep 17 00:00:00 2001 From: Shantanu Kotambkar <52007797+skotambkar@users.noreply.github.com> Date: Wed, 20 Nov 2019 16:18:14 -0800 Subject: [PATCH] Release v0.17.0 (2019-11-20) (#436) * Release v0.17.0 (2019-11-20) === Services --- * Synced the V2 SDK with latest AWS service API definitions. SDK Enhancements --- * SDK minimum version requirement has been updated to Go 1.12 ([#432](https://github.com/aws/aws-sdk-go-v2/pull/432)) --- CHANGELOG.md | 11 + CHANGELOG_PENDING.md | 1 - aws/endpoints/defaults.go | 60 +- aws/version.go | 2 +- .../cmd/op_crawler/create_service.go | 6 + .../AWSMigrationHub/2017-05-31/api-2.json | 57 +- .../AWSMigrationHub/2017-05-31/docs-2.json | 69 +- .../2017-05-31/paginators-1.json | 26 +- models/apis/autoscaling/2011-01-01/api-2.json | 21 +- .../apis/autoscaling/2011-01-01/docs-2.json | 117 +- models/apis/ce/2017-10-25/api-2.json | 4 +- models/apis/ce/2017-10-25/docs-2.json | 6 +- models/apis/chime/2018-05-01/api-2.json | 986 ++- models/apis/chime/2018-05-01/docs-2.json | 455 +- .../apis/chime/2018-05-01/paginators-1.json | 20 + .../apis/cloudformation/2010-05-15/api-2.json | 651 +- .../cloudformation/2010-05-15/docs-2.json | 422 +- .../2010-05-15/paginators-1.json | 15 + .../cloudformation/2010-05-15/waiters-2.json | 20 + models/apis/cloudsearch/2013-01-01/api-2.json | 887 +-- .../apis/cloudsearch/2013-01-01/docs-2.json | 56 +- .../cloudsearch/2013-01-01/examples-1.json | 5 + .../cloudsearch/2013-01-01/paginators-1.json | 3 +- models/apis/cloudsearch/2013-01-01/smoke.json | 18 + models/apis/cloudtrail/2013-11-01/api-2.json | 128 +- models/apis/cloudtrail/2013-11-01/docs-2.json | 85 +- models/apis/codebuild/2016-10-06/api-2.json | 7 +- models/apis/codebuild/2016-10-06/docs-2.json | 10 +- models/apis/codecommit/2015-04-13/api-2.json | 1141 +++- models/apis/codecommit/2015-04-13/docs-2.json | 1109 ++- .../codecommit/2015-04-13/paginators-1.json | 15 + models/apis/cognito-idp/2016-04-18/api-2.json | 36 +- .../apis/cognito-idp/2016-04-18/docs-2.json | 37 +- models/apis/config/2014-11-12/api-2.json | 787 ++- models/apis/config/2014-11-12/docs-2.json | 530 +- models/apis/connect/2017-08-08/api-2.json | 126 +- models/apis/connect/2017-08-08/docs-2.json | 56 +- .../apis/dataexchange/2017-07-25/api-2.json | 2263 +++++++ .../apis/dataexchange/2017-07-25/docs-2.json | 581 ++ .../dataexchange/2017-07-25/paginators-1.json | 28 + models/apis/datasync/2018-11-09/api-2.json | 29 +- models/apis/datasync/2018-11-09/docs-2.json | 23 +- models/apis/discovery/2015-11-01/api-2.json | 85 +- models/apis/discovery/2015-11-01/docs-2.json | 10 +- models/apis/dlm/2018-01-12/api-2.json | 211 +- models/apis/dlm/2018-01-12/docs-2.json | 100 +- models/apis/ec2/2016-11-15/api-2.json | 519 +- models/apis/ec2/2016-11-15/docs-2.json | 304 +- models/apis/ec2/2016-11-15/paginators-1.json | 12 + models/apis/ec2/2016-11-15/waiters-2.json | 18 + models/apis/ecs/2014-11-13/api-2.json | 84 +- models/apis/ecs/2014-11-13/docs-2.json | 64 +- models/apis/eks/2017-11-01/api-2.json | 469 +- models/apis/eks/2017-11-01/docs-2.json | 280 +- models/apis/eks/2017-11-01/examples-1.json | 21 + models/apis/eks/2017-11-01/paginators-1.json | 6 + models/apis/eks/2017-11-01/waiters-2.json | 37 + .../2015-12-01/api-2.json | 54 +- .../2015-12-01/docs-2.json | 68 +- .../elasticmapreduce/2009-03-31/api-2.json | 18 +- .../elasticmapreduce/2009-03-31/docs-2.json | 34 +- models/apis/firehose/2015-08-04/api-2.json | 74 +- models/apis/firehose/2015-08-04/docs-2.json | 56 +- models/apis/fsx/2018-03-01/api-2.json | 13 + models/apis/fsx/2018-03-01/docs-2.json | 25 +- models/apis/guardduty/2017-11-28/api-2.json | 302 + models/apis/guardduty/2017-11-28/docs-2.json | 232 +- .../guardduty/2017-11-28/paginators-1.json | 5 + models/apis/iam/2010-05-08/api-2.json | 13 +- models/apis/iam/2010-05-08/docs-2.json | 81 +- models/apis/iam/2010-05-08/examples-1.json | 5 + models/apis/iot/2015-05-28/api-2.json | 492 +- models/apis/iot/2015-05-28/docs-2.json | 356 +- models/apis/lambda/2015-03-31/api-2.json | 3 + models/apis/logs/2014-03-28/docs-2.json | 10 +- .../apis/mediaconvert/2017-08-29/api-2.json | 219 +- .../apis/mediaconvert/2017-08-29/docs-2.json | 141 +- models/apis/mediastore/2017-09-01/api-2.json | 4 +- models/apis/mediastore/2017-09-01/docs-2.json | 6 +- .../meteringmarketplace/2016-01-14/api-2.json | 3 +- .../2016-01-14/docs-2.json | 4 +- .../migrationhub-config/2019-06-30/api-2.json | 207 + .../2019-06-30/docs-2.json | 150 + .../2019-06-30/examples-1.json | 5 + .../2019-06-30/paginators-1.json | 9 + models/apis/personalize/2018-05-22/api-2.json | 151 + .../apis/personalize/2018-05-22/docs-2.json | 110 +- .../personalize/2018-05-22/paginators-1.json | 6 + models/apis/pinpoint/2016-12-01/api-2.json | 363 + models/apis/pinpoint/2016-12-01/docs-2.json | 112 +- models/apis/quicksight/2018-04-01/api-2.json | 5511 +++++++++++++-- models/apis/quicksight/2018-04-01/docs-2.json | 2642 +++++++- .../quicksight/2018-04-01/paginators-1.json | 40 + models/apis/rds/2014-10-31/docs-2.json | 14 +- .../runtime.sagemaker/2017-05-13/api-2.json | 15 +- .../runtime.sagemaker/2017-05-13/docs-2.json | 20 +- models/apis/s3/2006-03-01/api-2.json | 70 +- models/apis/s3/2006-03-01/docs-2.json | 1027 +-- models/apis/s3/2006-03-01/examples-1.json | 234 +- models/apis/sagemaker/2017-07-24/api-2.json | 8 + models/apis/sagemaker/2017-07-24/docs-2.json | 38 +- models/apis/sesv2/2019-09-27/api-2.json | 2093 ++++++ models/apis/sesv2/2019-09-27/docs-2.json | 1386 ++++ models/apis/sesv2/2019-09-27/examples-1.json | 5 + .../apis/sesv2/2019-09-27/paginators-1.json | 34 + models/apis/ssm/2014-11-06/api-2.json | 174 +- models/apis/ssm/2014-11-06/docs-2.json | 167 +- .../apis/storagegateway/2013-06-30/api-2.json | 97 +- .../storagegateway/2013-06-30/docs-2.json | 76 +- models/apis/transcribe/2017-10-26/api-2.json | 9 +- models/apis/transcribe/2017-10-26/docs-2.json | 9 +- models/apis/workspaces/2015-04-08/api-2.json | 238 +- models/apis/workspaces/2015-04-08/docs-2.json | 161 +- models/endpoints/endpoints.json | 57 +- .../applicationdiscoveryservice/api_doc.go | 4 + .../applicationdiscoveryservice/api_errors.go | 6 + .../api_op_DescribeExportConfigurations.go | 2 +- .../api_op_CreateAutoScalingGroup.go | 5 + .../api_op_CreateLaunchConfiguration.go | 14 +- service/autoscaling/api_op_EnterStandby.go | 9 + service/autoscaling/api_op_ExitStandby.go | 2 + .../api_op_UpdateAutoScalingGroup.go | 13 +- service/autoscaling/api_types.go | 124 +- service/chime/api_doc.go | 15 +- service/chime/api_enums.go | 35 + service/chime/api_op_BatchCreateAttendee.go | 197 + .../chime/api_op_BatchCreateRoomMembership.go | 188 + service/chime/api_op_CreateAttendee.go | 164 + service/chime/api_op_CreateMeeting.go | 192 + service/chime/api_op_CreateRoom.go | 176 + service/chime/api_op_CreateRoomMembership.go | 184 + service/chime/api_op_DeleteAttendee.go | 156 + service/chime/api_op_DeleteMeeting.go | 140 + service/chime/api_op_DeleteRoom.go | 152 + service/chime/api_op_DeleteRoomMembership.go | 167 + service/chime/api_op_GetAttendee.go | 161 + service/chime/api_op_GetMeeting.go | 146 + service/chime/api_op_GetRoom.go | 158 + ...GetVoiceConnectorStreamingConfiguration.go | 4 +- service/chime/api_op_InviteUsers.go | 10 +- service/chime/api_op_ListAttendees.go | 234 + service/chime/api_op_ListBots.go | 3 +- service/chime/api_op_ListMeetings.go | 219 + service/chime/api_op_ListRoomMemberships.go | 248 + service/chime/api_op_ListRooms.go | 243 + ...PutVoiceConnectorStreamingConfiguration.go | 4 +- .../chime/api_op_UpdatePhoneNumberSettings.go | 2 +- service/chime/api_op_UpdateRoom.go | 167 + service/chime/api_op_UpdateRoomMembership.go | 184 + service/chime/api_types.go | 571 +- service/chime/chimeiface/interface.go | 38 + service/cloudformation/api_enums.go | 179 +- service/cloudformation/api_errors.go | 26 + service/cloudformation/api_integ_test.go | 5 +- .../cloudformation/api_op_DeregisterType.go | 142 + service/cloudformation/api_op_DescribeType.go | 227 + .../api_op_DescribeTypeRegistration.go | 148 + .../api_op_DetectStackSetDrift.go | 171 + .../api_op_ListTypeRegistrations.go | 211 + .../cloudformation/api_op_ListTypeVersions.go | 219 + service/cloudformation/api_op_ListTypes.go | 227 + .../api_op_RecordHandlerProgress.go | 150 + service/cloudformation/api_op_RegisterType.go | 208 + .../api_op_SetTypeDefaultVersion.go | 132 + service/cloudformation/api_types.go | 279 + service/cloudformation/api_waiters.go | 44 + .../cloudformationiface/interface.go | 22 + service/cloudsearch/api_enums.go | 18 + service/cloudsearch/api_errors.go | 6 + service/cloudsearch/api_integ_test.go | 63 + .../api_op_DescribeDomainEndpointOptions.go | 131 + .../api_op_UpdateDomainEndpointOptions.go | 138 + service/cloudsearch/api_types.go | 36 + .../cloudsearch/cloudsearchiface/interface.go | 4 + service/cloudtrail/api_enums.go | 32 + service/cloudtrail/api_errors.go | 22 + service/cloudtrail/api_op_DescribeTrails.go | 6 +- .../cloudtrail/api_op_GetInsightSelectors.go | 147 + service/cloudtrail/api_op_ListTrails.go | 10 + service/cloudtrail/api_op_LookupEvents.go | 25 +- .../cloudtrail/api_op_PutInsightSelectors.go | 137 + service/cloudtrail/api_types.go | 31 +- .../cloudtrail/cloudtrailiface/interface.go | 4 + .../cloudwatchlogs/api_op_CreateLogGroup.go | 5 +- service/cloudwatchlogs/api_op_GetLogEvents.go | 2 + .../cloudwatchlogs/api_op_PutDestination.go | 11 +- service/cloudwatchlogs/api_op_StartQuery.go | 1 + service/cloudwatchlogs/api_types.go | 6 +- service/codebuild/api_doc.go | 36 +- service/codebuild/api_enums.go | 13 +- service/codebuild/api_op_BatchGetBuilds.go | 2 +- service/codebuild/api_op_BatchGetProjects.go | 2 +- service/codebuild/api_types.go | 34 +- service/codecommit/api_doc.go | 90 +- service/codecommit/api_enums.go | 39 + service/codecommit/api_errors.go | 299 +- ...ciateApprovalRuleTemplateWithRepository.go | 140 + ...ateApprovalRuleTemplateWithRepositories.go | 142 + .../api_op_BatchDescribeMergeConflicts.go | 15 +- ...ateApprovalRuleTemplateFromRepositories.go | 144 + service/codecommit/api_op_BatchGetCommits.go | 8 +- .../codecommit/api_op_BatchGetRepositories.go | 9 +- .../api_op_CreateApprovalRuleTemplate.go | 171 + service/codecommit/api_op_CreateBranch.go | 2 +- service/codecommit/api_op_CreateCommit.go | 24 +- .../codecommit/api_op_CreatePullRequest.go | 14 +- .../api_op_CreatePullRequestApprovalRule.go | 169 + service/codecommit/api_op_CreateRepository.go | 14 +- .../api_op_CreateUnreferencedMergeCommit.go | 23 +- .../api_op_DeleteApprovalRuleTemplate.go | 125 + service/codecommit/api_op_DeleteFile.go | 39 +- .../api_op_DeletePullRequestApprovalRule.go | 141 + service/codecommit/api_op_DeleteRepository.go | 4 +- .../api_op_DescribeMergeConflicts.go | 17 +- .../api_op_DescribePullRequestEvents.go | 12 +- ...ciateApprovalRuleTemplateFromRepository.go | 137 + ...api_op_EvaluatePullRequestApprovalRules.go | 134 + .../api_op_GetApprovalRuleTemplate.go | 123 + service/codecommit/api_op_GetBlob.go | 2 +- .../api_op_GetCommentsForComparedCommit.go | 8 +- .../api_op_GetCommentsForPullRequest.go | 6 +- service/codecommit/api_op_GetCommit.go | 2 +- service/codecommit/api_op_GetDifferences.go | 24 +- service/codecommit/api_op_GetFile.go | 18 +- service/codecommit/api_op_GetFolder.go | 24 +- service/codecommit/api_op_GetMergeCommit.go | 15 +- .../codecommit/api_op_GetMergeConflicts.go | 17 +- service/codecommit/api_op_GetMergeOptions.go | 17 +- .../api_op_GetPullRequestApprovalStates.go | 129 + .../api_op_GetPullRequestOverrideState.go | 136 + service/codecommit/api_op_GetRepository.go | 6 +- .../api_op_ListApprovalRuleTemplates.go | 166 + ...iatedApprovalRuleTemplatesForRepository.go | 186 + service/codecommit/api_op_ListPullRequests.go | 8 +- ...ListRepositoriesForApprovalRuleTemplate.go | 187 + .../codecommit/api_op_ListTagsForResource.go | 2 +- .../api_op_MergeBranchesByFastForward.go | 6 +- .../api_op_MergeBranchesBySquash.go | 29 +- .../api_op_MergeBranchesByThreeWay.go | 31 +- .../api_op_MergePullRequestByFastForward.go | 3 +- .../api_op_MergePullRequestBySquash.go | 25 +- .../api_op_MergePullRequestByThreeWay.go | 25 +- ...api_op_OverridePullRequestApprovalRules.go | 142 + .../api_op_PostCommentForComparedCommit.go | 22 +- .../api_op_PostCommentForPullRequest.go | 14 +- service/codecommit/api_op_PostCommentReply.go | 4 +- service/codecommit/api_op_PutFile.go | 24 +- .../api_op_PutRepositoryTriggers.go | 5 +- .../api_op_TestRepositoryTriggers.go | 8 +- ...pi_op_UpdateApprovalRuleTemplateContent.go | 143 + ...p_UpdateApprovalRuleTemplateDescription.go | 132 + .../api_op_UpdateApprovalRuleTemplateName.go | 135 + service/codecommit/api_op_UpdateComment.go | 3 +- ...op_UpdatePullRequestApprovalRuleContent.go | 172 + .../api_op_UpdatePullRequestApprovalState.go | 137 + .../api_op_UpdatePullRequestDescription.go | 2 +- .../api_op_UpdatePullRequestStatus.go | 2 +- .../api_op_UpdatePullRequestTitle.go | 2 +- .../api_op_UpdateRepositoryDescription.go | 6 +- .../codecommit/api_op_UpdateRepositoryName.go | 10 +- service/codecommit/api_types.go | 363 +- .../codecommit/codecommitiface/interface.go | 46 +- service/cognitoidentityprovider/api_enums.go | 41 +- .../api_op_AdminInitiateAuth.go | 5 + .../api_op_ConfirmForgotPassword.go | 14 +- .../api_op_CreateUserPoolClient.go | 61 +- .../api_op_InitiateAuth.go | 5 + .../api_op_UpdateGroup.go | 3 + .../api_op_UpdateResourceServer.go | 3 + .../api_op_UpdateUserAttributes.go | 14 +- .../api_op_UpdateUserPool.go | 8 +- .../api_op_UpdateUserPoolClient.go | 65 +- service/cognitoidentityprovider/api_types.go | 85 +- service/configservice/api_enums.go | 97 +- service/configservice/api_errors.go | 80 +- .../api_op_DeleteConformancePack.go | 126 + ...pi_op_DeleteOrganizationConformancePack.go | 128 + ...pi_op_DescribeConformancePackCompliance.go | 146 + .../api_op_DescribeConformancePackStatus.go | 113 + .../api_op_DescribeConformancePacks.go | 115 + .../api_op_DescribeOrganizationConfigRules.go | 2 +- ...ribeOrganizationConformancePackStatuses.go | 128 + ...op_DescribeOrganizationConformancePacks.go | 119 + ..._op_GetConformancePackComplianceDetails.go | 147 + ..._op_GetConformancePackComplianceSummary.go | 122 + ...ganizationConformancePackDetailedStatus.go | 141 + .../api_op_PutConformancePack.go | 182 + .../api_op_PutOrganizationConformancePack.go | 184 + service/configservice/api_types.go | 506 +- .../configserviceiface/interface.go | 24 + service/connect/api_op_CreateUser.go | 18 + service/connect/api_op_DescribeUser.go | 4 +- service/connect/api_op_ListTagsForResource.go | 149 + service/connect/api_op_TagResource.go | 164 + service/connect/api_op_UntagResource.go | 161 + service/connect/api_types.go | 15 + service/connect/connectiface/interface.go | 6 + .../costexplorer/api_op_GetCostAndUsage.go | 4 +- service/costexplorer/api_types.go | 10 + service/dataexchange/api_client.go | 79 + service/dataexchange/api_doc.go | 28 + service/dataexchange/api_enums.go | 186 + service/dataexchange/api_errors.go | 49 + service/dataexchange/api_op_CancelJob.go | 136 + service/dataexchange/api_op_CreateDataSet.go | 294 + service/dataexchange/api_op_CreateJob.go | 236 + service/dataexchange/api_op_CreateRevision.go | 250 + service/dataexchange/api_op_DeleteAsset.go | 161 + service/dataexchange/api_op_DeleteDataSet.go | 135 + service/dataexchange/api_op_DeleteRevision.go | 148 + service/dataexchange/api_op_GetAsset.go | 253 + service/dataexchange/api_op_GetDataSet.go | 242 + service/dataexchange/api_op_GetJob.go | 210 + service/dataexchange/api_op_GetRevision.go | 232 + .../api_op_ListDataSetRevisions.go | 229 + service/dataexchange/api_op_ListDataSets.go | 225 + service/dataexchange/api_op_ListJobs.go | 231 + .../dataexchange/api_op_ListRevisionAssets.go | 242 + .../api_op_ListTagsForResource.go | 146 + service/dataexchange/api_op_StartJob.go | 132 + service/dataexchange/api_op_TagResource.go | 154 + service/dataexchange/api_op_UntagResource.go | 154 + service/dataexchange/api_op_UpdateAsset.go | 271 + service/dataexchange/api_op_UpdateDataSet.go | 247 + service/dataexchange/api_op_UpdateRevision.go | 239 + service/dataexchange/api_types.go | 1545 +++++ .../dataexchangeiface/interface.go | 109 + service/datasync/api_enums.go | 6 +- service/datasync/api_op_CreateLocationEfs.go | 2 + service/datasync/api_op_CreateLocationSmb.go | 6 +- service/datasync/api_op_CreateTask.go | 10 + service/datasync/api_op_DescribeTask.go | 4 + service/datasync/api_op_UpdateTask.go | 12 + service/datasync/api_types.go | 39 +- service/dlm/api_op_CreateLifecyclePolicy.go | 20 +- service/dlm/api_op_ListTagsForResource.go | 149 + service/dlm/api_op_TagResource.go | 158 + service/dlm/api_op_UntagResource.go | 158 + service/dlm/api_op_UpdateLifecyclePolicy.go | 5 +- service/dlm/api_types.go | 129 + service/dlm/dlmiface/interface.go | 6 + service/ec2/api_enums.go | 71 + service/ec2/api_op_AttachVolume.go | 1 - service/ec2/api_op_CopySnapshot.go | 8 +- service/ec2/api_op_CreateCustomerGateway.go | 14 +- service/ec2/api_op_CreateSnapshot.go | 1 - service/ec2/api_op_CreateSnapshots.go | 7 +- service/ec2/api_op_CreateVolume.go | 4 +- service/ec2/api_op_DeleteSnapshot.go | 1 - service/ec2/api_op_DeleteVolume.go | 1 - .../ec2/api_op_DescribeExportImageTasks.go | 53 + .../api_op_DescribeFastSnapshotRestores.go | 181 + service/ec2/api_op_DescribeInstances.go | 14 +- .../ec2/api_op_DescribeSnapshotAttribute.go | 2 - service/ec2/api_op_DescribeVolumeAttribute.go | 2 - service/ec2/api_op_DescribeVpnConnections.go | 3 + service/ec2/api_op_DetachVolume.go | 1 - .../ec2/api_op_DisableFastSnapshotRestores.go | 139 + .../ec2/api_op_EnableFastSnapshotRestores.go | 144 + service/ec2/api_op_EnableVolumeIO.go | 1 - .../api_op_ModifyInstanceMetadataOptions.go | 162 + service/ec2/api_op_ModifySnapshotAttribute.go | 1 - service/ec2/api_op_ModifyVolumeAttribute.go | 1 - service/ec2/api_op_ResetSnapshotAttribute.go | 1 - service/ec2/api_op_RunInstances.go | 4 + service/ec2/api_types.go | 344 + service/ec2/api_waiters.go | 44 + service/ec2/ec2iface/interface.go | 10 + service/ecs/api_op_ListAccountSettings.go | 8 +- service/ecs/api_op_ListAttributes.go | 8 +- service/ecs/api_op_ListClusters.go | 8 +- service/ecs/api_op_ListContainerInstances.go | 8 +- service/ecs/api_op_ListServices.go | 8 +- .../ecs/api_op_ListTaskDefinitionFamilies.go | 8 +- service/ecs/api_op_ListTaskDefinitions.go | 8 +- service/ecs/api_op_ListTasks.go | 8 +- service/ecs/api_op_RunTask.go | 3 + service/ecs/api_op_StartTask.go | 3 + service/ecs/api_types.go | 44 +- service/eks/api_enums.go | 93 +- service/eks/api_errors.go | 3 +- service/eks/api_examples_test.go | 37 + service/eks/api_op_CreateNodegroup.go | 352 + service/eks/api_op_DeleteCluster.go | 3 + service/eks/api_op_DeleteNodegroup.go | 158 + service/eks/api_op_DescribeNodegroup.go | 158 + service/eks/api_op_DescribeUpdate.go | 14 +- service/eks/api_op_ListNodegroups.go | 246 + service/eks/api_op_ListTagsForResource.go | 3 +- service/eks/api_op_ListUpdates.go | 13 +- service/eks/api_op_TagResource.go | 7 +- service/eks/api_op_UntagResource.go | 3 +- service/eks/api_op_UpdateClusterVersion.go | 4 + service/eks/api_op_UpdateNodegroupConfig.go | 202 + service/eks/api_op_UpdateNodegroupVersion.go | 229 + service/eks/api_types.go | 596 +- service/eks/api_waiters.go | 88 + service/eks/eksiface/interface.go | 16 + service/elasticloadbalancingv2/api_errors.go | 8 + .../api_examples_test.go | 12 + .../api_op_CreateListener.go | 8 +- .../api_op_CreateRule.go | 8 +- .../api_op_ModifyListener.go | 18 +- .../api_op_ModifyRule.go | 15 +- service/elasticloadbalancingv2/api_types.go | 69 +- service/emr/api_op_AddInstanceFleet.go | 3 + service/emr/api_op_AddInstanceGroups.go | 3 + service/emr/api_op_ListSteps.go | 7 +- service/emr/api_op_PutAutoScalingPolicy.go | 3 + service/emr/api_op_RunJobFlow.go | 14 +- service/emr/api_op_SetVisibleToAllUsers.go | 32 +- service/emr/api_types.go | 66 +- service/firehose/api_enums.go | 58 +- service/firehose/api_errors.go | 9 + .../firehose/api_op_CreateDeliveryStream.go | 25 +- .../firehose/api_op_DeleteDeliveryStream.go | 30 +- .../firehose/api_op_DescribeDeliveryStream.go | 14 +- .../api_op_StartDeliveryStreamEncryption.go | 42 +- .../api_op_StopDeliveryStreamEncryption.go | 9 +- service/firehose/api_types.go | 107 +- service/fsx/api_enums.go | 17 + service/fsx/api_op_CreateFileSystem.go | 19 +- service/fsx/api_types.go | 85 +- service/guardduty/api_enums.go | 35 + service/guardduty/api_op_CreateIPSet.go | 7 +- .../api_op_CreatePublishingDestination.go | 195 + .../guardduty/api_op_CreateSampleFindings.go | 2 +- .../guardduty/api_op_CreateThreatIntelSet.go | 3 +- service/guardduty/api_op_DeleteIPSet.go | 7 +- .../api_op_DeletePublishingDestination.go | 153 + .../api_op_DescribePublishingDestination.go | 211 + service/guardduty/api_op_GetIPSet.go | 8 +- service/guardduty/api_op_ListFindings.go | 103 +- service/guardduty/api_op_ListIPSets.go | 4 +- .../api_op_ListPublishingDestinations.go | 244 + .../guardduty/api_op_ListThreatIntelSets.go | 11 +- .../api_op_StartMonitoringMembers.go | 13 +- .../guardduty/api_op_StopMonitoringMembers.go | 6 +- service/guardduty/api_op_TagResource.go | 3 +- service/guardduty/api_op_UnarchiveFindings.go | 7 +- service/guardduty/api_op_UntagResource.go | 4 +- service/guardduty/api_op_UpdateDetector.go | 10 +- .../api_op_UpdateFindingsFeedback.go | 7 +- .../api_op_UpdatePublishingDestination.go | 162 + service/guardduty/api_types.go | 164 +- service/guardduty/guarddutyiface/interface.go | 10 + service/iam/api_types.go | 119 +- service/iot/api_enums.go | 37 + service/iot/api_op_AttachThingPrincipal.go | 3 +- .../iot/api_op_ConfirmTopicRuleDestination.go | 139 + service/iot/api_op_CreateStream.go | 4 - .../iot/api_op_CreateTopicRuleDestination.go | 147 + .../iot/api_op_DeleteTopicRuleDestination.go | 132 + service/iot/api_op_GetCardinality.go | 177 + service/iot/api_op_GetPercentiles.go | 207 + service/iot/api_op_GetTopicRuleDestination.go | 141 + .../iot/api_op_ListTopicRuleDestinations.go | 162 + .../iot/api_op_UpdateTopicRuleDestination.go | 172 + service/iot/api_types.go | 754 ++- service/iot/iotiface/interface.go | 16 + service/lambda/api_enums.go | 3 + service/marketplacemetering/api_doc.go | 9 +- .../api_op_RegisterUsage.go | 40 +- service/mediaconvert/api_enums.go | 152 +- service/mediaconvert/api_types.go | 619 +- service/mediastore/api_op_CreateContainer.go | 2 +- service/mediastore/api_op_TagResource.go | 2 +- service/mediastore/api_types.go | 10 +- service/migrationhub/api_doc.go | 4 + service/migrationhub/api_errors.go | 23 +- .../api_op_AssociateCreatedArtifact.go | 3 +- .../api_op_AssociateDiscoveredResource.go | 7 +- .../api_op_CreateProgressUpdateStream.go | 3 +- .../api_op_DeleteProgressUpdateStream.go | 5 +- .../api_op_DescribeApplicationState.go | 3 +- .../api_op_DescribeMigrationTask.go | 3 +- .../api_op_DisassociateCreatedArtifact.go | 2 +- .../api_op_DisassociateDiscoveredResource.go | 9 +- .../api_op_ImportMigrationTask.go | 5 +- .../api_op_ListCreatedArtifacts.go | 56 +- .../api_op_ListDiscoveredResources.go | 55 +- .../migrationhub/api_op_ListMigrationTasks.go | 53 + .../api_op_ListProgressUpdateStreams.go | 53 + .../api_op_NotifyApplicationState.go | 7 +- .../api_op_NotifyMigrationTaskState.go | 3 +- .../api_op_PutResourceAttributes.go | 14 +- service/migrationhub/api_types.go | 15 +- service/migrationhubconfig/api_client.go | 81 + service/migrationhubconfig/api_doc.go | 49 + service/migrationhubconfig/api_enums.go | 19 + service/migrationhubconfig/api_errors.go | 40 + .../api_op_CreateHomeRegionControl.go | 141 + .../api_op_DescribeHomeRegionControls.go | 201 + .../api_op_GetHomeRegion.go | 103 + service/migrationhubconfig/api_types.go | 79 + .../migrationhubconfigiface/interface.go | 71 + .../api_op_CreateBatchInferenceJob.go | 175 + .../api_op_DescribeBatchInferenceJob.go | 120 + .../api_op_ListBatchInferenceJobs.go | 181 + service/personalize/api_types.go | 203 +- .../personalize/personalizeiface/interface.go | 10 +- .../pinpoint/api_op_CreateVoiceTemplate.go | 160 + .../pinpoint/api_op_DeleteVoiceTemplate.go | 144 + service/pinpoint/api_op_GetVoiceTemplate.go | 145 + service/pinpoint/api_op_SendMessages.go | 3 +- .../pinpoint/api_op_UpdateVoiceTemplate.go | 160 + service/pinpoint/api_types.go | 504 +- service/pinpoint/pinpointiface/interface.go | 8 + service/quicksight/api_enums.go | 468 +- service/quicksight/api_errors.go | 15 +- service/quicksight/api_op_CancelIngestion.go | 201 + service/quicksight/api_op_CreateDashboard.go | 370 + service/quicksight/api_op_CreateDataSet.go | 424 ++ service/quicksight/api_op_CreateDataSource.go | 360 + .../api_op_CreateIAMPolicyAssignment.go | 310 + service/quicksight/api_op_CreateIngestion.go | 215 + service/quicksight/api_op_CreateTemplate.go | 338 + .../quicksight/api_op_CreateTemplateAlias.go | 220 + service/quicksight/api_op_DeleteDashboard.go | 207 + service/quicksight/api_op_DeleteDataSet.go | 193 + service/quicksight/api_op_DeleteDataSource.go | 192 + .../api_op_DeleteIAMPolicyAssignment.go | 197 + service/quicksight/api_op_DeleteTemplate.go | 220 + .../quicksight/api_op_DeleteTemplateAlias.go | 220 + service/quicksight/api_op_DeleteUser.go | 2 - .../api_op_DeleteUserByPrincipalId.go | 2 - .../quicksight/api_op_DescribeDashboard.go | 210 + .../api_op_DescribeDashboardPermissions.go | 207 + service/quicksight/api_op_DescribeDataSet.go | 183 + .../api_op_DescribeDataSetPermissions.go | 210 + .../quicksight/api_op_DescribeDataSource.go | 176 + .../api_op_DescribeDataSourcePermissions.go | 200 + .../api_op_DescribeIAMPolicyAssignment.go | 197 + .../quicksight/api_op_DescribeIngestion.go | 192 + service/quicksight/api_op_DescribeTemplate.go | 211 + .../api_op_DescribeTemplateAlias.go | 200 + .../api_op_DescribeTemplatePermissions.go | 206 + service/quicksight/api_op_DescribeUser.go | 2 - .../quicksight/api_op_GetDashboardEmbedUrl.go | 27 +- .../api_op_ListDashboardVersions.go | 271 + service/quicksight/api_op_ListDashboards.go | 254 + service/quicksight/api_op_ListDataSets.go | 252 + service/quicksight/api_op_ListDataSources.go | 252 + .../api_op_ListIAMPolicyAssignments.go | 225 + .../api_op_ListIAMPolicyAssignmentsForUser.go | 234 + service/quicksight/api_op_ListIngestions.go | 263 + .../quicksight/api_op_ListTagsForResource.go | 168 + .../quicksight/api_op_ListTemplateAliases.go | 271 + .../quicksight/api_op_ListTemplateVersions.go | 271 + service/quicksight/api_op_ListTemplates.go | 253 + service/quicksight/api_op_ListUserGroups.go | 2 - service/quicksight/api_op_ListUsers.go | 2 - service/quicksight/api_op_RegisterUser.go | 18 +- service/quicksight/api_op_TagResource.go | 207 + service/quicksight/api_op_UntagResource.go | 177 + service/quicksight/api_op_UpdateDashboard.go | 307 + .../api_op_UpdateDashboardPermissions.go | 301 + .../api_op_UpdateDashboardPublishedVersion.go | 209 + service/quicksight/api_op_UpdateDataSet.go | 373 ++ .../api_op_UpdateDataSetPermissions.go | 251 + service/quicksight/api_op_UpdateDataSource.go | 280 + .../api_op_UpdateDataSourcePermissions.go | 251 + .../api_op_UpdateIAMPolicyAssignment.go | 303 + service/quicksight/api_op_UpdateTemplate.go | 263 + .../quicksight/api_op_UpdateTemplateAlias.go | 218 + .../api_op_UpdateTemplatePermissions.go | 279 + service/quicksight/api_op_UpdateUser.go | 6 +- service/quicksight/api_types.go | 5933 ++++++++++++++++- .../quicksight/quicksightiface/interface.go | 102 +- service/rds/api_op_CreateDBInstance.go | 7 + service/rds/api_op_CreateEventSubscription.go | 3 + .../api_op_ModifyCurrentDBClusterCapacity.go | 6 +- service/rds/api_op_ModifyDBSnapshot.go | 7 +- service/rds/api_types.go | 12 +- service/s3/api_enums.go | 79 +- service/s3/api_errors.go | 6 + service/s3/api_examples_test.go | 164 +- service/s3/api_op_AbortMultipartUpload.go | 26 +- service/s3/api_op_CompleteMultipartUpload.go | 88 +- service/s3/api_op_CopyObject.go | 197 +- service/s3/api_op_CreateBucket.go | 64 +- service/s3/api_op_CreateMultipartUpload.go | 172 +- service/s3/api_op_DeleteBucket.go | 8 + ...i_op_DeleteBucketAnalyticsConfiguration.go | 15 +- service/s3/api_op_DeleteBucketCors.go | 18 +- service/s3/api_op_DeleteBucketEncryption.go | 18 +- ...i_op_DeleteBucketInventoryConfiguration.go | 17 + service/s3/api_op_DeleteBucketLifecycle.go | 24 +- ...api_op_DeleteBucketMetricsConfiguration.go | 24 +- service/s3/api_op_DeleteBucketPolicy.go | 26 +- service/s3/api_op_DeleteBucketReplication.go | 23 +- service/s3/api_op_DeleteBucketTagging.go | 12 + service/s3/api_op_DeleteBucketWebsite.go | 23 +- service/s3/api_op_DeleteObject.go | 33 +- service/s3/api_op_DeleteObjectTagging.go | 20 +- service/s3/api_op_DeleteObjects.go | 56 +- service/s3/api_op_DeletePublicAccessBlock.go | 16 +- ...api_op_GetBucketAccelerateConfiguration.go | 27 +- service/s3/api_op_GetBucketAcl.go | 13 +- .../api_op_GetBucketAnalyticsConfiguration.go | 23 +- service/s3/api_op_GetBucketCors.go | 19 +- service/s3/api_op_GetBucketEncryption.go | 16 +- .../api_op_GetBucketInventoryConfiguration.go | 21 +- service/s3/api_op_GetBucketLifecycle.go | 31 +- .../api_op_GetBucketLifecycleConfiguration.go | 35 +- service/s3/api_op_GetBucketLocation.go | 16 +- service/s3/api_op_GetBucketLogging.go | 8 + .../api_op_GetBucketMetricsConfiguration.go | 21 +- service/s3/api_op_GetBucketNotification.go | 11 +- ...i_op_GetBucketNotificationConfiguration.go | 18 +- service/s3/api_op_GetBucketPolicy.go | 23 +- service/s3/api_op_GetBucketPolicyStatus.go | 17 +- service/s3/api_op_GetBucketReplication.go | 24 + service/s3/api_op_GetBucketRequestPayment.go | 10 +- service/s3/api_op_GetBucketTagging.go | 19 + service/s3/api_op_GetBucketVersioning.go | 16 + service/s3/api_op_GetBucketWebsite.go | 21 +- service/s3/api_op_GetObject.go | 142 +- service/s3/api_op_GetObjectAcl.go | 21 +- service/s3/api_op_GetObjectLegalHold.go | 3 +- .../s3/api_op_GetObjectLockConfiguration.go | 11 +- service/s3/api_op_GetObjectRetention.go | 3 +- service/s3/api_op_GetObjectTagging.go | 28 +- service/s3/api_op_GetObjectTorrent.go | 20 +- service/s3/api_op_GetPublicAccessBlock.go | 25 +- service/s3/api_op_HeadBucket.go | 12 +- service/s3/api_op_HeadObject.go | 129 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 31 +- ...pi_op_ListBucketInventoryConfigurations.go | 33 +- .../api_op_ListBucketMetricsConfigurations.go | 29 +- service/s3/api_op_ListBuckets.go | 2 + service/s3/api_op_ListMultipartUploads.go | 72 +- service/s3/api_op_ListObjectVersions.go | 69 +- service/s3/api_op_ListObjects.go | 48 +- service/s3/api_op_ListObjectsV2.go | 79 +- service/s3/api_op_ListParts.go | 67 +- ...api_op_PutBucketAccelerateConfiguration.go | 38 +- service/s3/api_op_PutBucketAcl.go | 77 +- .../api_op_PutBucketAnalyticsConfiguration.go | 45 +- service/s3/api_op_PutBucketCors.go | 48 +- service/s3/api_op_PutBucketEncryption.go | 34 +- .../api_op_PutBucketInventoryConfiguration.go | 50 +- service/s3/api_op_PutBucketLifecycle.go | 51 +- .../api_op_PutBucketLifecycleConfiguration.go | 71 +- service/s3/api_op_PutBucketLogging.go | 49 +- .../api_op_PutBucketMetricsConfiguration.go | 28 +- service/s3/api_op_PutBucketNotification.go | 4 + ...i_op_PutBucketNotificationConfiguration.go | 52 +- service/s3/api_op_PutBucketPolicy.go | 25 +- service/s3/api_op_PutBucketReplication.go | 61 +- service/s3/api_op_PutBucketRequestPayment.go | 14 +- service/s3/api_op_PutBucketTagging.go | 45 + service/s3/api_op_PutBucketVersioning.go | 38 +- service/s3/api_op_PutBucketWebsite.go | 66 +- service/s3/api_op_PutObject.go | 251 +- service/s3/api_op_PutObjectAcl.go | 74 +- service/s3/api_op_PutObjectLegalHold.go | 4 + .../s3/api_op_PutObjectLockConfiguration.go | 17 +- service/s3/api_op_PutObjectRetention.go | 6 +- service/s3/api_op_PutObjectTagging.go | 45 + service/s3/api_op_PutPublicAccessBlock.go | 24 +- service/s3/api_op_RestoreObject.go | 189 + service/s3/api_op_UploadPart.go | 79 +- service/s3/api_op_UploadPartCopy.go | 97 +- service/s3/api_types.go | 880 ++- service/s3/s3manager/upload_input.go | 67 +- service/sagemaker/api_enums.go | 17 + .../sagemaker/api_op_CreateEndpointConfig.go | 19 +- .../sagemaker/api_op_CreateTransformJob.go | 8 +- service/sagemaker/api_types.go | 86 +- service/sagemakerruntime/api_errors.go | 3 +- .../sagemakerruntime/api_op_InvokeEndpoint.go | 53 +- service/sesv2/api_client.go | 79 + service/sesv2/api_doc.go | 54 + service/sesv2/api_enums.go | 250 + service/sesv2/api_errors.go | 68 + .../sesv2/api_op_CreateConfigurationSet.go | 210 + ..._CreateConfigurationSetEventDestination.go | 180 + service/sesv2/api_op_CreateDedicatedIpPool.go | 165 + .../api_op_CreateDeliverabilityTestReport.go | 233 + service/sesv2/api_op_CreateEmailIdentity.go | 210 + .../sesv2/api_op_DeleteConfigurationSet.go | 143 + ..._DeleteConfigurationSetEventDestination.go | 159 + service/sesv2/api_op_DeleteDedicatedIpPool.go | 137 + service/sesv2/api_op_DeleteEmailIdentity.go | 141 + service/sesv2/api_op_GetAccount.go | 187 + service/sesv2/api_op_GetBlacklistReports.go | 169 + service/sesv2/api_op_GetConfigurationSet.go | 210 + ...op_GetConfigurationSetEventDestinations.go | 160 + service/sesv2/api_op_GetDedicatedIp.go | 149 + service/sesv2/api_op_GetDedicatedIps.go | 222 + ...pi_op_GetDeliverabilityDashboardOptions.go | 199 + .../api_op_GetDeliverabilityTestReport.go | 204 + .../api_op_GetDomainDeliverabilityCampaign.go | 155 + .../sesv2/api_op_GetDomainStatisticsReport.go | 206 + service/sesv2/api_op_GetEmailIdentity.go | 213 + service/sesv2/api_op_ListConfigurationSets.go | 222 + service/sesv2/api_op_ListDedicatedIpPools.go | 214 + .../api_op_ListDeliverabilityTestReports.go | 223 + ...pi_op_ListDomainDeliverabilityCampaigns.go | 284 + service/sesv2/api_op_ListEmailIdentities.go | 223 + service/sesv2/api_op_ListTagsForResource.go | 158 + ...p_PutAccountDedicatedIpWarmupAttributes.go | 124 + .../api_op_PutAccountSendingAttributes.go | 125 + ...i_op_PutConfigurationSetDeliveryOptions.go | 162 + ...op_PutConfigurationSetReputationOptions.go | 151 + ...pi_op_PutConfigurationSetSendingOptions.go | 150 + ...i_op_PutConfigurationSetTrackingOptions.go | 149 + service/sesv2/api_op_PutDedicatedIpInPool.go | 161 + .../api_op_PutDedicatedIpWarmupAttributes.go | 152 + ...api_op_PutDeliverabilityDashboardOption.go | 169 + .../api_op_PutEmailIdentityDkimAttributes.go | 151 + ...i_op_PutEmailIdentityFeedbackAttributes.go | 168 + ...i_op_PutEmailIdentityMailFromAttributes.go | 172 + service/sesv2/api_op_SendEmail.go | 251 + service/sesv2/api_op_TagResource.go | 176 + service/sesv2/api_op_UntagResource.go | 161 + ..._UpdateConfigurationSetEventDestination.go | 180 + service/sesv2/api_types.go | 2262 +++++++ service/sesv2/sesv2iface/interface.go | 149 + service/ssm/api_enums.go | 3 + service/ssm/api_op_CreateOpsItem.go | 12 + service/ssm/api_op_CreateResourceDataSync.go | 55 +- service/ssm/api_op_DeleteResourceDataSync.go | 11 +- service/ssm/api_op_DescribeParameters.go | 2 +- service/ssm/api_op_GetOpsSummary.go | 27 +- service/ssm/api_op_GetParametersByPath.go | 8 +- service/ssm/api_op_ListCommandInvocations.go | 2 +- service/ssm/api_op_ListResourceDataSync.go | 9 + service/ssm/api_op_PutInventory.go | 2 +- service/ssm/api_op_UpdateOpsItem.go | 12 + service/ssm/api_types.go | 293 +- service/storagegateway/api_enums.go | 60 + .../api_op_DeleteBandwidthRateLimit.go | 3 +- .../api_op_DeleteChapCredentials.go | 3 +- .../api_op_DescribeAvailabilityMonitorTest.go | 134 + .../api_op_DescribeBandwidthRateLimit.go | 3 +- .../api_op_DescribeChapCredentials.go | 1 + .../api_op_DescribeGatewayInformation.go | 7 +- .../api_op_DescribeSMBSettings.go | 22 + service/storagegateway/api_op_DetachVolume.go | 3 +- service/storagegateway/api_op_JoinDomain.go | 29 +- .../api_op_ListTagsForResource.go | 2 +- service/storagegateway/api_op_RefreshCache.go | 11 + .../api_op_RemoveTagsFromResource.go | 4 +- .../api_op_StartAvailabilityMonitorTest.go | 129 + .../api_op_UpdateBandwidthRateLimit.go | 3 +- .../api_op_UpdateChapCredentials.go | 3 +- .../storagegatewayiface/interface.go | 4 + service/support/api_integ_test.go | 5 +- service/transcribe/api_types.go | 13 + service/workspaces/api_enums.go | 34 + service/workspaces/api_errors.go | 21 +- .../workspaces/api_op_AssociateIpGroups.go | 5 +- .../api_op_DeregisterWorkspaceDirectory.go | 122 + service/workspaces/api_op_DescribeAccount.go | 2 +- .../api_op_DescribeAccountModifications.go | 4 +- .../api_op_DescribeWorkspaceDirectories.go | 9 +- .../workspaces/api_op_DescribeWorkspaces.go | 5 +- .../workspaces/api_op_DisassociateIpGroups.go | 5 +- .../workspaces/api_op_ImportWorkspaceImage.go | 2 +- ...pi_op_ListAvailableManagementCidrRanges.go | 4 +- service/workspaces/api_op_ModifyAccount.go | 2 +- .../api_op_ModifySelfservicePermissions.go | 129 + .../api_op_ModifyWorkspaceAccessProperties.go | 128 + ...pi_op_ModifyWorkspaceCreationProperties.go | 132 + .../api_op_RegisterWorkspaceDirectory.go | 167 + service/workspaces/api_types.go | 157 +- .../workspaces/workspacesiface/interface.go | 10 + 769 files changed, 95936 insertions(+), 4726 deletions(-) create mode 100644 models/apis/cloudsearch/2013-01-01/examples-1.json create mode 100644 models/apis/cloudsearch/2013-01-01/smoke.json create mode 100644 models/apis/dataexchange/2017-07-25/api-2.json create mode 100644 models/apis/dataexchange/2017-07-25/docs-2.json create mode 100644 models/apis/dataexchange/2017-07-25/paginators-1.json create mode 100644 models/apis/migrationhub-config/2019-06-30/api-2.json create mode 100644 models/apis/migrationhub-config/2019-06-30/docs-2.json create mode 100644 models/apis/migrationhub-config/2019-06-30/examples-1.json create mode 100644 models/apis/migrationhub-config/2019-06-30/paginators-1.json create mode 100644 models/apis/sesv2/2019-09-27/api-2.json create mode 100644 models/apis/sesv2/2019-09-27/docs-2.json create mode 100644 models/apis/sesv2/2019-09-27/examples-1.json create mode 100644 models/apis/sesv2/2019-09-27/paginators-1.json create mode 100644 service/chime/api_op_BatchCreateAttendee.go create mode 100644 service/chime/api_op_BatchCreateRoomMembership.go create mode 100644 service/chime/api_op_CreateAttendee.go create mode 100644 service/chime/api_op_CreateMeeting.go create mode 100644 service/chime/api_op_CreateRoom.go create mode 100644 service/chime/api_op_CreateRoomMembership.go create mode 100644 service/chime/api_op_DeleteAttendee.go create mode 100644 service/chime/api_op_DeleteMeeting.go create mode 100644 service/chime/api_op_DeleteRoom.go create mode 100644 service/chime/api_op_DeleteRoomMembership.go create mode 100644 service/chime/api_op_GetAttendee.go create mode 100644 service/chime/api_op_GetMeeting.go create mode 100644 service/chime/api_op_GetRoom.go create mode 100644 service/chime/api_op_ListAttendees.go create mode 100644 service/chime/api_op_ListMeetings.go create mode 100644 service/chime/api_op_ListRoomMemberships.go create mode 100644 service/chime/api_op_ListRooms.go create mode 100644 service/chime/api_op_UpdateRoom.go create mode 100644 service/chime/api_op_UpdateRoomMembership.go create mode 100644 service/cloudformation/api_op_DeregisterType.go create mode 100644 service/cloudformation/api_op_DescribeType.go create mode 100644 service/cloudformation/api_op_DescribeTypeRegistration.go create mode 100644 service/cloudformation/api_op_DetectStackSetDrift.go create mode 100644 service/cloudformation/api_op_ListTypeRegistrations.go create mode 100644 service/cloudformation/api_op_ListTypeVersions.go create mode 100644 service/cloudformation/api_op_ListTypes.go create mode 100644 service/cloudformation/api_op_RecordHandlerProgress.go create mode 100644 service/cloudformation/api_op_RegisterType.go create mode 100644 service/cloudformation/api_op_SetTypeDefaultVersion.go create mode 100644 service/cloudsearch/api_integ_test.go create mode 100644 service/cloudsearch/api_op_DescribeDomainEndpointOptions.go create mode 100644 service/cloudsearch/api_op_UpdateDomainEndpointOptions.go create mode 100644 service/cloudtrail/api_op_GetInsightSelectors.go create mode 100644 service/cloudtrail/api_op_PutInsightSelectors.go create mode 100644 service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go create mode 100644 service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go create mode 100644 service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go create mode 100644 service/codecommit/api_op_CreateApprovalRuleTemplate.go create mode 100644 service/codecommit/api_op_CreatePullRequestApprovalRule.go create mode 100644 service/codecommit/api_op_DeleteApprovalRuleTemplate.go create mode 100644 service/codecommit/api_op_DeletePullRequestApprovalRule.go create mode 100644 service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go create mode 100644 service/codecommit/api_op_EvaluatePullRequestApprovalRules.go create mode 100644 service/codecommit/api_op_GetApprovalRuleTemplate.go create mode 100644 service/codecommit/api_op_GetPullRequestApprovalStates.go create mode 100644 service/codecommit/api_op_GetPullRequestOverrideState.go create mode 100644 service/codecommit/api_op_ListApprovalRuleTemplates.go create mode 100644 service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go create mode 100644 service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go create mode 100644 service/codecommit/api_op_OverridePullRequestApprovalRules.go create mode 100644 service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go create mode 100644 service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go create mode 100644 service/codecommit/api_op_UpdateApprovalRuleTemplateName.go create mode 100644 service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go create mode 100644 service/codecommit/api_op_UpdatePullRequestApprovalState.go create mode 100644 service/configservice/api_op_DeleteConformancePack.go create mode 100644 service/configservice/api_op_DeleteOrganizationConformancePack.go create mode 100644 service/configservice/api_op_DescribeConformancePackCompliance.go create mode 100644 service/configservice/api_op_DescribeConformancePackStatus.go create mode 100644 service/configservice/api_op_DescribeConformancePacks.go create mode 100644 service/configservice/api_op_DescribeOrganizationConformancePackStatuses.go create mode 100644 service/configservice/api_op_DescribeOrganizationConformancePacks.go create mode 100644 service/configservice/api_op_GetConformancePackComplianceDetails.go create mode 100644 service/configservice/api_op_GetConformancePackComplianceSummary.go create mode 100644 service/configservice/api_op_GetOrganizationConformancePackDetailedStatus.go create mode 100644 service/configservice/api_op_PutConformancePack.go create mode 100644 service/configservice/api_op_PutOrganizationConformancePack.go create mode 100644 service/connect/api_op_ListTagsForResource.go create mode 100644 service/connect/api_op_TagResource.go create mode 100644 service/connect/api_op_UntagResource.go create mode 100644 service/dataexchange/api_client.go create mode 100644 service/dataexchange/api_doc.go create mode 100644 service/dataexchange/api_enums.go create mode 100644 service/dataexchange/api_errors.go create mode 100644 service/dataexchange/api_op_CancelJob.go create mode 100644 service/dataexchange/api_op_CreateDataSet.go create mode 100644 service/dataexchange/api_op_CreateJob.go create mode 100644 service/dataexchange/api_op_CreateRevision.go create mode 100644 service/dataexchange/api_op_DeleteAsset.go create mode 100644 service/dataexchange/api_op_DeleteDataSet.go create mode 100644 service/dataexchange/api_op_DeleteRevision.go create mode 100644 service/dataexchange/api_op_GetAsset.go create mode 100644 service/dataexchange/api_op_GetDataSet.go create mode 100644 service/dataexchange/api_op_GetJob.go create mode 100644 service/dataexchange/api_op_GetRevision.go create mode 100644 service/dataexchange/api_op_ListDataSetRevisions.go create mode 100644 service/dataexchange/api_op_ListDataSets.go create mode 100644 service/dataexchange/api_op_ListJobs.go create mode 100644 service/dataexchange/api_op_ListRevisionAssets.go create mode 100644 service/dataexchange/api_op_ListTagsForResource.go create mode 100644 service/dataexchange/api_op_StartJob.go create mode 100644 service/dataexchange/api_op_TagResource.go create mode 100644 service/dataexchange/api_op_UntagResource.go create mode 100644 service/dataexchange/api_op_UpdateAsset.go create mode 100644 service/dataexchange/api_op_UpdateDataSet.go create mode 100644 service/dataexchange/api_op_UpdateRevision.go create mode 100644 service/dataexchange/api_types.go create mode 100644 service/dataexchange/dataexchangeiface/interface.go create mode 100644 service/dlm/api_op_ListTagsForResource.go create mode 100644 service/dlm/api_op_TagResource.go create mode 100644 service/dlm/api_op_UntagResource.go create mode 100644 service/ec2/api_op_DescribeFastSnapshotRestores.go create mode 100644 service/ec2/api_op_DisableFastSnapshotRestores.go create mode 100644 service/ec2/api_op_EnableFastSnapshotRestores.go create mode 100644 service/ec2/api_op_ModifyInstanceMetadataOptions.go create mode 100644 service/eks/api_op_CreateNodegroup.go create mode 100644 service/eks/api_op_DeleteNodegroup.go create mode 100644 service/eks/api_op_DescribeNodegroup.go create mode 100644 service/eks/api_op_ListNodegroups.go create mode 100644 service/eks/api_op_UpdateNodegroupConfig.go create mode 100644 service/eks/api_op_UpdateNodegroupVersion.go create mode 100644 service/guardduty/api_op_CreatePublishingDestination.go create mode 100644 service/guardduty/api_op_DeletePublishingDestination.go create mode 100644 service/guardduty/api_op_DescribePublishingDestination.go create mode 100644 service/guardduty/api_op_ListPublishingDestinations.go create mode 100644 service/guardduty/api_op_UpdatePublishingDestination.go create mode 100644 service/iot/api_op_ConfirmTopicRuleDestination.go create mode 100644 service/iot/api_op_CreateTopicRuleDestination.go create mode 100644 service/iot/api_op_DeleteTopicRuleDestination.go create mode 100644 service/iot/api_op_GetCardinality.go create mode 100644 service/iot/api_op_GetPercentiles.go create mode 100644 service/iot/api_op_GetTopicRuleDestination.go create mode 100644 service/iot/api_op_ListTopicRuleDestinations.go create mode 100644 service/iot/api_op_UpdateTopicRuleDestination.go create mode 100644 service/migrationhubconfig/api_client.go create mode 100644 service/migrationhubconfig/api_doc.go create mode 100644 service/migrationhubconfig/api_enums.go create mode 100644 service/migrationhubconfig/api_errors.go create mode 100644 service/migrationhubconfig/api_op_CreateHomeRegionControl.go create mode 100644 service/migrationhubconfig/api_op_DescribeHomeRegionControls.go create mode 100644 service/migrationhubconfig/api_op_GetHomeRegion.go create mode 100644 service/migrationhubconfig/api_types.go create mode 100644 service/migrationhubconfig/migrationhubconfigiface/interface.go create mode 100644 service/personalize/api_op_CreateBatchInferenceJob.go create mode 100644 service/personalize/api_op_DescribeBatchInferenceJob.go create mode 100644 service/personalize/api_op_ListBatchInferenceJobs.go create mode 100644 service/pinpoint/api_op_CreateVoiceTemplate.go create mode 100644 service/pinpoint/api_op_DeleteVoiceTemplate.go create mode 100644 service/pinpoint/api_op_GetVoiceTemplate.go create mode 100644 service/pinpoint/api_op_UpdateVoiceTemplate.go create mode 100644 service/quicksight/api_op_CancelIngestion.go create mode 100644 service/quicksight/api_op_CreateDashboard.go create mode 100644 service/quicksight/api_op_CreateDataSet.go create mode 100644 service/quicksight/api_op_CreateDataSource.go create mode 100644 service/quicksight/api_op_CreateIAMPolicyAssignment.go create mode 100644 service/quicksight/api_op_CreateIngestion.go create mode 100644 service/quicksight/api_op_CreateTemplate.go create mode 100644 service/quicksight/api_op_CreateTemplateAlias.go create mode 100644 service/quicksight/api_op_DeleteDashboard.go create mode 100644 service/quicksight/api_op_DeleteDataSet.go create mode 100644 service/quicksight/api_op_DeleteDataSource.go create mode 100644 service/quicksight/api_op_DeleteIAMPolicyAssignment.go create mode 100644 service/quicksight/api_op_DeleteTemplate.go create mode 100644 service/quicksight/api_op_DeleteTemplateAlias.go create mode 100644 service/quicksight/api_op_DescribeDashboard.go create mode 100644 service/quicksight/api_op_DescribeDashboardPermissions.go create mode 100644 service/quicksight/api_op_DescribeDataSet.go create mode 100644 service/quicksight/api_op_DescribeDataSetPermissions.go create mode 100644 service/quicksight/api_op_DescribeDataSource.go create mode 100644 service/quicksight/api_op_DescribeDataSourcePermissions.go create mode 100644 service/quicksight/api_op_DescribeIAMPolicyAssignment.go create mode 100644 service/quicksight/api_op_DescribeIngestion.go create mode 100644 service/quicksight/api_op_DescribeTemplate.go create mode 100644 service/quicksight/api_op_DescribeTemplateAlias.go create mode 100644 service/quicksight/api_op_DescribeTemplatePermissions.go create mode 100644 service/quicksight/api_op_ListDashboardVersions.go create mode 100644 service/quicksight/api_op_ListDashboards.go create mode 100644 service/quicksight/api_op_ListDataSets.go create mode 100644 service/quicksight/api_op_ListDataSources.go create mode 100644 service/quicksight/api_op_ListIAMPolicyAssignments.go create mode 100644 service/quicksight/api_op_ListIAMPolicyAssignmentsForUser.go create mode 100644 service/quicksight/api_op_ListIngestions.go create mode 100644 service/quicksight/api_op_ListTagsForResource.go create mode 100644 service/quicksight/api_op_ListTemplateAliases.go create mode 100644 service/quicksight/api_op_ListTemplateVersions.go create mode 100644 service/quicksight/api_op_ListTemplates.go create mode 100644 service/quicksight/api_op_TagResource.go create mode 100644 service/quicksight/api_op_UntagResource.go create mode 100644 service/quicksight/api_op_UpdateDashboard.go create mode 100644 service/quicksight/api_op_UpdateDashboardPermissions.go create mode 100644 service/quicksight/api_op_UpdateDashboardPublishedVersion.go create mode 100644 service/quicksight/api_op_UpdateDataSet.go create mode 100644 service/quicksight/api_op_UpdateDataSetPermissions.go create mode 100644 service/quicksight/api_op_UpdateDataSource.go create mode 100644 service/quicksight/api_op_UpdateDataSourcePermissions.go create mode 100644 service/quicksight/api_op_UpdateIAMPolicyAssignment.go create mode 100644 service/quicksight/api_op_UpdateTemplate.go create mode 100644 service/quicksight/api_op_UpdateTemplateAlias.go create mode 100644 service/quicksight/api_op_UpdateTemplatePermissions.go create mode 100644 service/sesv2/api_client.go create mode 100644 service/sesv2/api_doc.go create mode 100644 service/sesv2/api_enums.go create mode 100644 service/sesv2/api_errors.go create mode 100644 service/sesv2/api_op_CreateConfigurationSet.go create mode 100644 service/sesv2/api_op_CreateConfigurationSetEventDestination.go create mode 100644 service/sesv2/api_op_CreateDedicatedIpPool.go create mode 100644 service/sesv2/api_op_CreateDeliverabilityTestReport.go create mode 100644 service/sesv2/api_op_CreateEmailIdentity.go create mode 100644 service/sesv2/api_op_DeleteConfigurationSet.go create mode 100644 service/sesv2/api_op_DeleteConfigurationSetEventDestination.go create mode 100644 service/sesv2/api_op_DeleteDedicatedIpPool.go create mode 100644 service/sesv2/api_op_DeleteEmailIdentity.go create mode 100644 service/sesv2/api_op_GetAccount.go create mode 100644 service/sesv2/api_op_GetBlacklistReports.go create mode 100644 service/sesv2/api_op_GetConfigurationSet.go create mode 100644 service/sesv2/api_op_GetConfigurationSetEventDestinations.go create mode 100644 service/sesv2/api_op_GetDedicatedIp.go create mode 100644 service/sesv2/api_op_GetDedicatedIps.go create mode 100644 service/sesv2/api_op_GetDeliverabilityDashboardOptions.go create mode 100644 service/sesv2/api_op_GetDeliverabilityTestReport.go create mode 100644 service/sesv2/api_op_GetDomainDeliverabilityCampaign.go create mode 100644 service/sesv2/api_op_GetDomainStatisticsReport.go create mode 100644 service/sesv2/api_op_GetEmailIdentity.go create mode 100644 service/sesv2/api_op_ListConfigurationSets.go create mode 100644 service/sesv2/api_op_ListDedicatedIpPools.go create mode 100644 service/sesv2/api_op_ListDeliverabilityTestReports.go create mode 100644 service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go create mode 100644 service/sesv2/api_op_ListEmailIdentities.go create mode 100644 service/sesv2/api_op_ListTagsForResource.go create mode 100644 service/sesv2/api_op_PutAccountDedicatedIpWarmupAttributes.go create mode 100644 service/sesv2/api_op_PutAccountSendingAttributes.go create mode 100644 service/sesv2/api_op_PutConfigurationSetDeliveryOptions.go create mode 100644 service/sesv2/api_op_PutConfigurationSetReputationOptions.go create mode 100644 service/sesv2/api_op_PutConfigurationSetSendingOptions.go create mode 100644 service/sesv2/api_op_PutConfigurationSetTrackingOptions.go create mode 100644 service/sesv2/api_op_PutDedicatedIpInPool.go create mode 100644 service/sesv2/api_op_PutDedicatedIpWarmupAttributes.go create mode 100644 service/sesv2/api_op_PutDeliverabilityDashboardOption.go create mode 100644 service/sesv2/api_op_PutEmailIdentityDkimAttributes.go create mode 100644 service/sesv2/api_op_PutEmailIdentityFeedbackAttributes.go create mode 100644 service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go create mode 100644 service/sesv2/api_op_SendEmail.go create mode 100644 service/sesv2/api_op_TagResource.go create mode 100644 service/sesv2/api_op_UntagResource.go create mode 100644 service/sesv2/api_op_UpdateConfigurationSetEventDestination.go create mode 100644 service/sesv2/api_types.go create mode 100644 service/sesv2/sesv2iface/interface.go create mode 100644 service/storagegateway/api_op_DescribeAvailabilityMonitorTest.go create mode 100644 service/storagegateway/api_op_StartAvailabilityMonitorTest.go create mode 100644 service/workspaces/api_op_DeregisterWorkspaceDirectory.go create mode 100644 service/workspaces/api_op_ModifySelfservicePermissions.go create mode 100644 service/workspaces/api_op_ModifyWorkspaceAccessProperties.go create mode 100644 service/workspaces/api_op_ModifyWorkspaceCreationProperties.go create mode 100644 service/workspaces/api_op_RegisterWorkspaceDirectory.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b71a9e02db..180172c6012 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v0.17.0 (2019-11-20) +=== + +Services +--- +* Synced the V2 SDK with latest AWS service API definitions. + +SDK Enhancements +--- +* SDK minimum version requirement has been updated to Go 1.12 ([#432](https://github.com/aws/aws-sdk-go-v2/pull/432)) + Release v0.16.0 (2019-11-12) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 080c6c3a961..cb7c688ed3e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -6,7 +6,6 @@ SDK Features SDK Enhancements --- -* SDK minimum version requirement has been updated to Go 1.12 ([#432](https://github.com/aws/aws-sdk-go-v2/pull/432)) SDK Bugs --- diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index c1536fdc42e..5d1234684d6 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -1116,6 +1116,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -1706,11 +1722,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2817,6 +2838,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3513,6 +3538,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3522,26 +3551,11 @@ var awsPartition = partition{ "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "shield.ca-central-1.amazonaws.com", + SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, }, }, "sms": service{ @@ -4243,6 +4257,12 @@ var awscnPartition = partition{ Unresolveable: boxedTrue, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -4630,6 +4650,12 @@ var awscnPartition = partition{ }, }, }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, }, } diff --git a/aws/version.go b/aws/version.go index 0935f02b2c9..bb797c5604b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "0.16.0" +const SDKVersion = "0.17.0" diff --git a/internal/awstesting/cmd/op_crawler/create_service.go b/internal/awstesting/cmd/op_crawler/create_service.go index ecc177f443b..fb984b7064f 100644 --- a/internal/awstesting/cmd/op_crawler/create_service.go +++ b/internal/awstesting/cmd/op_crawler/create_service.go @@ -55,6 +55,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/costandusagereportservice" "github.com/aws/aws-sdk-go-v2/service/costexplorer" "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" "github.com/aws/aws-sdk-go-v2/service/datapipeline" "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go-v2/service/dax" @@ -133,6 +134,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/mediastoredata" "github.com/aws/aws-sdk-go-v2/service/mediatailor" "github.com/aws/aws-sdk-go-v2/service/migrationhub" + "github.com/aws/aws-sdk-go-v2/service/migrationhubconfig" "github.com/aws/aws-sdk-go-v2/service/mobile" "github.com/aws/aws-sdk-go-v2/service/mobileanalytics" "github.com/aws/aws-sdk-go-v2/service/mq" @@ -176,6 +178,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/servicediscovery" "github.com/aws/aws-sdk-go-v2/service/servicequotas" "github.com/aws/aws-sdk-go-v2/service/ses" + "github.com/aws/aws-sdk-go-v2/service/sesv2" "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/aws/aws-sdk-go-v2/service/shield" "github.com/aws/aws-sdk-go-v2/service/signer" @@ -265,6 +268,7 @@ func createServices(cfg aws.Config) []service { {name: "costandusagereportservice", value: reflect.ValueOf(costandusagereportservice.New(cfg))}, {name: "costexplorer", value: reflect.ValueOf(costexplorer.New(cfg))}, {name: "databasemigrationservice", value: reflect.ValueOf(databasemigrationservice.New(cfg))}, + {name: "dataexchange", value: reflect.ValueOf(dataexchange.New(cfg))}, {name: "datapipeline", value: reflect.ValueOf(datapipeline.New(cfg))}, {name: "datasync", value: reflect.ValueOf(datasync.New(cfg))}, {name: "dax", value: reflect.ValueOf(dax.New(cfg))}, @@ -343,6 +347,7 @@ func createServices(cfg aws.Config) []service { {name: "mediastoredata", value: reflect.ValueOf(mediastoredata.New(cfg))}, {name: "mediatailor", value: reflect.ValueOf(mediatailor.New(cfg))}, {name: "migrationhub", value: reflect.ValueOf(migrationhub.New(cfg))}, + {name: "migrationhubconfig", value: reflect.ValueOf(migrationhubconfig.New(cfg))}, {name: "mobile", value: reflect.ValueOf(mobile.New(cfg))}, {name: "mobileanalytics", value: reflect.ValueOf(mobileanalytics.New(cfg))}, {name: "mq", value: reflect.ValueOf(mq.New(cfg))}, @@ -386,6 +391,7 @@ func createServices(cfg aws.Config) []service { {name: "servicediscovery", value: reflect.ValueOf(servicediscovery.New(cfg))}, {name: "servicequotas", value: reflect.ValueOf(servicequotas.New(cfg))}, {name: "ses", value: reflect.ValueOf(ses.New(cfg))}, + {name: "sesv2", value: reflect.ValueOf(sesv2.New(cfg))}, {name: "sfn", value: reflect.ValueOf(sfn.New(cfg))}, {name: "shield", value: reflect.ValueOf(shield.New(cfg))}, {name: "signer", value: reflect.ValueOf(signer.New(cfg))}, diff --git a/models/apis/AWSMigrationHub/2017-05-31/api-2.json b/models/apis/AWSMigrationHub/2017-05-31/api-2.json index 4c1d52efc85..3eb3a228df7 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/api-2.json +++ b/models/apis/AWSMigrationHub/2017-05-31/api-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Migration Hub", + "serviceId":"Migration Hub", "signatureVersion":"v4", "targetPrefix":"AWSMigrationHub", "uid":"AWSMigrationHub-2017-05-31" @@ -26,7 +27,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "AssociateDiscoveredResource":{ @@ -45,7 +47,8 @@ {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateProgressUpdateStream":{ @@ -62,7 +65,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, - {"shape":"InvalidInputException"} + {"shape":"InvalidInputException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteProgressUpdateStream":{ @@ -80,7 +84,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeApplicationState":{ @@ -97,7 +102,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeMigrationTask":{ @@ -113,7 +119,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateCreatedArtifact":{ @@ -131,7 +138,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateDiscoveredResource":{ @@ -149,7 +157,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ImportMigrationTask":{ @@ -167,7 +176,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListCreatedArtifacts":{ @@ -183,7 +193,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListDiscoveredResources":{ @@ -199,7 +210,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListMigrationTasks":{ @@ -216,7 +228,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListProgressUpdateStreams":{ @@ -231,7 +244,8 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, - {"shape":"InvalidInputException"} + {"shape":"InvalidInputException"}, + {"shape":"HomeRegionNotSetException"} ] }, "NotifyApplicationState":{ @@ -250,7 +264,8 @@ {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "NotifyMigrationTaskState":{ @@ -268,7 +283,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "PutResourceAttributes":{ @@ -286,7 +302,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] } }, @@ -497,6 +514,13 @@ "exception":true }, "ErrorMessage":{"type":"string"}, + "HomeRegionNotSetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "ImportMigrationTaskRequest":{ "type":"structure", "required":[ @@ -666,6 +690,7 @@ "members":{ "ApplicationId":{"shape":"ApplicationId"}, "Status":{"shape":"ApplicationStatus"}, + "UpdateDateTime":{"shape":"UpdateDateTime"}, "DryRun":{"shape":"DryRun"} } }, diff --git a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json index 5cb5869d564..a085c8ea343 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json +++ b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json @@ -1,15 +1,15 @@ { "version": "2.0", - "service": "
The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub.
", + "service": "The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub.
Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException
error will be returned. Also, you must make the API calls while in your home region.
Associates a created artifact of an AWS cloud resource, the target receiving the migration, with the migration task performed by a migration tool. This API has the following traits:
Migration tools can call the AssociateCreatedArtifact
operation to indicate which AWS artifact is associated with a migration task.
The created artifact name must be provided in ARN (Amazon Resource Name) format which will contain information about type and region; for example: arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b
.
Examples of the AWS resource behind the created artifact are, AMI's, EC2 instance, or DMS endpoint, etc.
Associates a discovered resource ID from Application Discovery Service (ADS) with a migration task.
", + "AssociateDiscoveredResource": "Associates a discovered resource ID from Application Discovery Service with a migration task.
", "CreateProgressUpdateStream": "Creates a progress update stream which is an AWS resource used for access control as well as a namespace for migration task names that is implicitly linked to your AWS account. It must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.
", - "DeleteProgressUpdateStream": "Deletes a progress update stream, including all of its tasks, which was previously created as an AWS resource used for access control. This API has the following traits:
The only parameter needed for DeleteProgressUpdateStream
is the stream name (same as a CreateProgressUpdateStream
call).
The call will return, and a background process will asynchronously delete the stream and all of its resources (tasks, associated resources, resource attributes, created artifacts).
If the stream takes time to be deleted, it might still show up on a ListProgressUpdateStreams
call.
CreateProgressUpdateStream
, ImportMigrationTask
, NotifyMigrationTaskState
, and all Associate[*] APIs realted to the tasks belonging to the stream will throw \"InvalidInputException\" if the stream of the same name is in the process of being deleted.
Once the stream and all of its resources are deleted, CreateProgressUpdateStream
for a stream of the same name will succeed, and that stream will be an entirely new logical resource (without any resources associated with the old stream).
Deletes a progress update stream, including all of its tasks, which was previously created as an AWS resource used for access control. This API has the following traits:
The only parameter needed for DeleteProgressUpdateStream
is the stream name (same as a CreateProgressUpdateStream
call).
The call will return, and a background process will asynchronously delete the stream and all of its resources (tasks, associated resources, resource attributes, created artifacts).
If the stream takes time to be deleted, it might still show up on a ListProgressUpdateStreams
call.
CreateProgressUpdateStream
, ImportMigrationTask
, NotifyMigrationTaskState
, and all Associate[*] APIs related to the tasks belonging to the stream will throw \"InvalidInputException\" if the stream of the same name is in the process of being deleted.
Once the stream and all of its resources are deleted, CreateProgressUpdateStream
for a stream of the same name will succeed, and that stream will be an entirely new logical resource (without any resources associated with the old stream).
Gets the migration status of an application.
", "DescribeMigrationTask": "Retrieves a list of all attributes associated with a specific migration task.
", "DisassociateCreatedArtifact": "Disassociates a created artifact of an AWS resource with a migration task performed by a migration tool that was previously associated. This API has the following traits:
A migration user can call the DisassociateCreatedArtifacts
operation to disassociate a created AWS Artifact from a migration task.
The created artifact name must be provided in ARN (Amazon Resource Name) format which will contain information about type and region; for example: arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b
.
Examples of the AWS resource behind the created artifact are, AMI's, EC2 instance, or RDS instance, etc.
Disassociate an Application Discovery Service (ADS) discovered resource from a migration task.
", + "DisassociateDiscoveredResource": "Disassociate an Application Discovery Service discovered resource from a migration task.
", "ImportMigrationTask": "Registers a new migration task which represents a server, database, etc., being migrated to AWS by a migration tool.
This API is a prerequisite to calling the NotifyMigrationTaskState
API as the migration tool must first register the migration task with Migration Hub.
Lists the created artifacts attached to a given migration task in an update stream. This API has the following traits:
Gets the list of the created artifacts while migration is taking place.
Shows the artifacts created by the migration tool that was associated by the AssociateCreatedArtifact
API.
Lists created artifacts in a paginated interface.
Lists discovered resources associated with the given MigrationTask
.
Lists progress update streams associated with the user account making this call.
", "NotifyApplicationState": "Sets the migration state of an application. For a given application identified by the value passed to ApplicationId
, its status is set or updated by passing one of three values to Status
: NOT_STARTED | IN_PROGRESS | COMPLETED
.
Notifies Migration Hub of the current status, progress, or other detail regarding a migration task. This API has the following traits:
Migration tools will call the NotifyMigrationTaskState
API to share the latest progress and status.
MigrationTaskName
is used for addressing updates to the correct target.
ProgressUpdateStream
is used for access control and to provide a namespace for each migration tool.
Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service (ADS)'s repository. This association occurs asynchronously after PutResourceAttributes
returns.
Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overiding the MAC address.
Note the instructions regarding the special use case of the ResourceAttributeList
parameter when specifying any \"VM\" related value.
Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources
.
Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes
returns.
Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overriding the MAC address.
Note the instructions regarding the special use case of the ResourceAttributeList
parameter when specifying any \"VM\" related value.
Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources
.
The configurationId in ADS that uniquely identifies the grouped application.
", - "NotifyApplicationStateRequest$ApplicationId": "The configurationId in ADS that uniquely identifies the grouped application.
" + "DescribeApplicationStateRequest$ApplicationId": "The configurationId in Application Discovery Service that uniquely identifies the grouped application.
", + "NotifyApplicationStateRequest$ApplicationId": "The configurationId in Application Discovery Service that uniquely identifies the grouped application.
" } }, "ApplicationStatus": { @@ -62,8 +62,8 @@ "ConfigurationId": { "base": null, "refs": { - "DisassociateDiscoveredResourceRequest$ConfigurationId": "ConfigurationId of the ADS resource to be disassociated.
", - "DiscoveredResource$ConfigurationId": "The configurationId in ADS that uniquely identifies the on-premise resource.
" + "DisassociateDiscoveredResourceRequest$ConfigurationId": "ConfigurationId of the Application Discovery Service resource to be disassociated.
", + "DiscoveredResource$ConfigurationId": "The configurationId in Application Discovery Service that uniquely identifies the on-premise resource.
" } }, "CreateProgressUpdateStreamRequest": { @@ -196,6 +196,7 @@ "refs": { "AccessDeniedException$Message": null, "DryRunOperation$Message": null, + "HomeRegionNotSetException$Message": null, "InternalServerError$Message": null, "InvalidInputException$Message": null, "PolicyErrorException$Message": null, @@ -204,6 +205,11 @@ "UnauthorizedOperation$Message": null } }, + "HomeRegionNotSetException": { + "base": "The home region is not set. Set the home region to continue.
", + "refs": { + } + }, "ImportMigrationTaskRequest": { "base": null, "refs": { @@ -215,7 +221,7 @@ } }, "InternalServerError": { - "base": "Exception raised when there is an internal, configuration, or dependency error encountered.
", + "base": "Exception raised when an internal, configuration, or dependency error is encountered.
", "refs": { } }, @@ -227,7 +233,7 @@ "LatestResourceAttributeList": { "base": null, "refs": { - "MigrationTask$ResourceAttributeList": "" + "MigrationTask$ResourceAttributeList": "Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository.
" } }, "ListCreatedArtifactsRequest": { @@ -298,18 +304,18 @@ "MigrationTaskName": { "base": null, "refs": { - "AssociateCreatedArtifactRequest$MigrationTaskName": "Unique identifier that references the migration task.
", - "AssociateDiscoveredResourceRequest$MigrationTaskName": "The identifier given to the MigrationTask.
", - "DescribeMigrationTaskRequest$MigrationTaskName": "The identifier given to the MigrationTask.
", - "DisassociateCreatedArtifactRequest$MigrationTaskName": "Unique identifier that references the migration task to be disassociated with the artifact.
", - "DisassociateDiscoveredResourceRequest$MigrationTaskName": "The identifier given to the MigrationTask.
", - "ImportMigrationTaskRequest$MigrationTaskName": "Unique identifier that references the migration task.
", - "ListCreatedArtifactsRequest$MigrationTaskName": "Unique identifier that references the migration task.
", - "ListDiscoveredResourcesRequest$MigrationTaskName": "The name of the MigrationTask.
", - "MigrationTask$MigrationTaskName": "Unique identifier that references the migration task.
", - "MigrationTaskSummary$MigrationTaskName": "Unique identifier that references the migration task.
", - "NotifyMigrationTaskStateRequest$MigrationTaskName": "Unique identifier that references the migration task.
", - "PutResourceAttributesRequest$MigrationTaskName": "Unique identifier that references the migration task.
" + "AssociateCreatedArtifactRequest$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "AssociateDiscoveredResourceRequest$MigrationTaskName": "The identifier given to the MigrationTask. Do not store personal data in this field.
", + "DescribeMigrationTaskRequest$MigrationTaskName": "The identifier given to the MigrationTask. Do not store personal data in this field.
", + "DisassociateCreatedArtifactRequest$MigrationTaskName": "Unique identifier that references the migration task to be disassociated with the artifact. Do not store personal data in this field.
", + "DisassociateDiscoveredResourceRequest$MigrationTaskName": "The identifier given to the MigrationTask. Do not store personal data in this field.
", + "ImportMigrationTaskRequest$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "ListCreatedArtifactsRequest$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "ListDiscoveredResourcesRequest$MigrationTaskName": "The name of the MigrationTask. Do not store personal data in this field.
", + "MigrationTask$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "MigrationTaskSummary$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "NotifyMigrationTaskStateRequest$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
", + "PutResourceAttributesRequest$MigrationTaskName": "Unique identifier that references the migration task. Do not store personal data in this field.
" } }, "MigrationTaskSummary": { @@ -351,14 +357,14 @@ } }, "PolicyErrorException": { - "base": "Exception raised when there are problems accessing ADS (Application Discovery Service); most likely due to a misconfigured policy or the migrationhub-discovery
role is missing or not configured correctly.
Exception raised when there are problems accessing Application Discovery Service (Application Discovery Service); most likely due to a misconfigured policy or the migrationhub-discovery
role is missing or not configured correctly.
Indication of the percentage completion of the task.
", "Task$ProgressPercent": "Indication of the percentage completion of the task.
" } }, @@ -367,18 +373,18 @@ "refs": { "AssociateCreatedArtifactRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", "AssociateDiscoveredResourceRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", - "CreateProgressUpdateStreamRequest$ProgressUpdateStreamName": "The name of the ProgressUpdateStream.
", - "DeleteProgressUpdateStreamRequest$ProgressUpdateStreamName": "The name of the ProgressUpdateStream.
", + "CreateProgressUpdateStreamRequest$ProgressUpdateStreamName": "The name of the ProgressUpdateStream. Do not store personal data in this field.
", + "DeleteProgressUpdateStreamRequest$ProgressUpdateStreamName": "The name of the ProgressUpdateStream. Do not store personal data in this field.
", "DescribeMigrationTaskRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", "DisassociateCreatedArtifactRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", "DisassociateDiscoveredResourceRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", - "ImportMigrationTaskRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", + "ImportMigrationTaskRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream. >
", "ListCreatedArtifactsRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", "ListDiscoveredResourcesRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", "MigrationTask$ProgressUpdateStream": "A name that identifies the vendor of the migration tool being used.
", "MigrationTaskSummary$ProgressUpdateStream": "An AWS resource used for access control. It should uniquely identify the migration tool as it is used for all updates made by the tool.
", "NotifyMigrationTaskStateRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
", - "ProgressUpdateStreamSummary$ProgressUpdateStreamName": "The name of the ProgressUpdateStream.
", + "ProgressUpdateStreamSummary$ProgressUpdateStreamName": "The name of the ProgressUpdateStream. Do not store personal data in this field.
", "PutResourceAttributesRequest$ProgressUpdateStream": "The name of the ProgressUpdateStream.
" } }, @@ -414,7 +420,7 @@ "ResourceAttributeList": { "base": null, "refs": { - "PutResourceAttributesRequest$ResourceAttributeList": "Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service (ADS)'s repository.
Takes the object array of ResourceAttribute
where the Type
field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER
where the identifying value can be a string up to 256 characters.
If any \"VM\" related value is set for a ResourceAttribute
object, it is required that VM_MANAGER_ID
, as a minimum, is always set. If VM_MANAGER_ID
is not set, then all \"VM\" fields will be discarded and \"VM\" fields will not be used for matching the migration task to a server in Application Discovery Service (ADS)'s repository. See the Example section below for a use case of specifying \"VM\" related values.
If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList
parameter to maximize the chances of matching.
Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository.
Takes the object array of ResourceAttribute
where the Type
field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER
where the identifying value can be a string up to 256 characters.
If any \"VM\" related value is set for a ResourceAttribute
object, it is required that VM_MANAGER_ID
, as a minimum, is always set. If VM_MANAGER_ID
is not set, then all \"VM\" fields will be discarded and \"VM\" fields will not be used for matching the migration task to a server in Application Discovery Service repository. See the Example section below for a use case of specifying \"VM\" related values.
If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList
parameter to maximize the chances of matching.
Exception raised when the request references a resource (ADS configuration, update stream, migration task, etc.) that does not exist in ADS (Application Discovery Service) or in Migration Hub's repository.
", + "base": "Exception raised when the request references a resource (Application Discovery Service configuration, update stream, migration task, etc.) that does not exist in Application Discovery Service (Application Discovery Service) or in Migration Hub's repository.
", "refs": { } }, @@ -490,6 +496,7 @@ "DescribeApplicationStateResult$LastUpdatedTime": "The timestamp when the application status was last updated.
", "MigrationTask$UpdateDateTime": "The timestamp when the task was gathered.
", "MigrationTaskSummary$UpdateDateTime": "The timestamp when the task was gathered.
", + "NotifyApplicationStateRequest$UpdateDateTime": "The timestamp when the application state changed.
", "NotifyMigrationTaskStateRequest$UpdateDateTime": "The timestamp when the task was gathered.
" } } diff --git a/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json b/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json index 5677bd8e4a2..2d785c8af28 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json +++ b/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json @@ -1,4 +1,28 @@ { "pagination": { + "ListCreatedArtifacts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CreatedArtifactList" + }, + "ListDiscoveredResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DiscoveredResourceList" + }, + "ListMigrationTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MigrationTaskSummaryList" + }, + "ListProgressUpdateStreams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ProgressUpdateStreamSummaryList" + } } -} +} \ No newline at end of file diff --git a/models/apis/autoscaling/2011-01-01/api-2.json b/models/apis/autoscaling/2011-01-01/api-2.json index bb8a88957bd..512f6095f2a 100644 --- a/models/apis/autoscaling/2011-01-01/api-2.json +++ b/models/apis/autoscaling/2011-01-01/api-2.json @@ -939,7 +939,8 @@ "Tags":{"shape":"TagDescriptionList"}, "TerminationPolicies":{"shape":"TerminationPolicies"}, "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "AutoScalingGroupDesiredCapacity":{"type":"integer"}, @@ -981,13 +982,15 @@ ], "members":{ "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, "LifecycleState":{"shape":"XmlStringMaxLen32"}, "HealthStatus":{"shape":"XmlStringMaxLen32"}, "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, "LaunchTemplate":{"shape":"LaunchTemplateSpecification"}, - "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + "ProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "AutoScalingInstances":{ @@ -1128,7 +1131,8 @@ "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, "LifecycleHookSpecificationList":{"shape":"LifecycleHookSpecifications"}, "Tags":{"shape":"Tags"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "CreateLaunchConfigurationType":{ @@ -1564,12 +1568,14 @@ ], "members":{ "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, "LifecycleState":{"shape":"LifecycleState"}, "HealthStatus":{"shape":"XmlStringMaxLen32"}, "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, "LaunchTemplate":{"shape":"LaunchTemplateSpecification"}, - "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + "ProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "InstanceIds":{ @@ -1687,7 +1693,8 @@ "LaunchTemplateOverrides":{ "type":"structure", "members":{ - "InstanceType":{"shape":"XmlStringMaxLen255"} + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "LaunchTemplateSpecification":{ @@ -1804,6 +1811,7 @@ "type":"list", "member":{"shape":"LoadBalancerTargetGroupState"} }, + "MaxInstanceLifetime":{"type":"integer"}, "MaxNumberOfAutoScalingGroups":{"type":"integer"}, "MaxNumberOfLaunchConfigurations":{"type":"integer"}, "MaxRecords":{"type":"integer"}, @@ -2387,7 +2395,8 @@ "VPCZoneIdentifier":{"shape":"XmlStringMaxLen2047"}, "TerminationPolicies":{"shape":"TerminationPolicies"}, "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "Values":{ diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index 99b7bf6b277..cf99a3c32be 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -3,7 +3,7 @@ "service": "Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.
For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide.
", "operations": { "AttachInstances": "Attaches one or more EC2 instances to the specified Auto Scaling group.
When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.
If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.
For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", - "AttachLoadBalancerTargetGroups": "Attaches one or more target groups to the specified Auto Scaling group.
To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.
With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", + "AttachLoadBalancerTargetGroups": "Attaches one or more target groups to the specified Auto Scaling group.
To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.
With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", "AttachLoadBalancers": "Attaches one or more Classic Load Balancers to the specified Auto Scaling group.
To attach an Application Load Balancer or a Network Load Balancer instead, see AttachLoadBalancerTargetGroups.
To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.
For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", "BatchDeleteScheduledAction": "Deletes one or more scheduled actions for the specified Auto Scaling group.
", "BatchPutScheduledUpdateGroupAction": "Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.
", @@ -33,7 +33,7 @@ "DescribePolicies": "Describes the policies for the specified Auto Scaling group.
", "DescribeScalingActivities": "Describes one or more scaling activities for the specified Auto Scaling group.
", "DescribeScalingProcessTypes": "Describes the scaling process types for use with ResumeProcesses and SuspendProcesses.
", - "DescribeScheduledActions": "Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.
", + "DescribeScheduledActions": "Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.
", "DescribeTags": "Describes the specified tags.
You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.
You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.
", "DescribeTerminationPolicyTypes": "Describes the termination policies supported by Amazon EC2 Auto Scaling.
For more information, see Controlling Which Auto Scaling Instances Terminate During Scale In in the Amazon EC2 Auto Scaling User Guide.
", "DetachInstances": "Removes one or more instances from the specified Auto Scaling group.
After the instances are detached, you can manage them independent of the Auto Scaling group.
If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.
If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.
For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", @@ -41,9 +41,9 @@ "DetachLoadBalancers": "Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups instead.
When you detach a load balancer, it enters the Removing
state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. The instances remain running.
Disables group metrics for the specified Auto Scaling group.
", "EnableMetricsCollection": "Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.
", - "EnterStandby": "Moves the specified instances into the standby state.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", + "EnterStandby": "Moves the specified instances into the standby state.
If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or greater than the minimum capacity of the group.
If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches new instances to replace the instances on standby.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", "ExecutePolicy": "Executes the specified policy.
", - "ExitStandby": "Moves the specified instances out of the standby state.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", + "ExitStandby": "Moves the specified instances out of the standby state.
After you put the instances back in service, the desired capacity is incremented.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
", "PutLifecycleHook": "Creates or updates a lifecycle hook for the specified Auto Scaling group.
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.
If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.
For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.
If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.
", "PutNotificationConfiguration": "Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information, see Getting Amazon SNS Notifications When Your Auto Scaling Group Scales in the Amazon EC2 Auto Scaling User Guide.
", "PutScalingPolicy": "Creates or updates a scaling policy for an Auto Scaling group. To update an existing scaling policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.
For more information about using scaling policies to scale your Auto Scaling group automatically, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.
", @@ -55,7 +55,7 @@ "SetInstanceProtection": "Updates the instance protection settings of the specified instances.
For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.
", "SuspendProcesses": "Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.
If you suspend either the Launch
or Terminate
process types, it can prevent other process types from functioning properly.
To resume processes that have been suspended, use ResumeProcesses.
For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.
", "TerminateInstanceInAutoScalingGroup": "Terminates the specified instance and optionally adjusts the desired group size.
This call simply makes a termination request. The instance is not terminated immediately.
", - "UpdateAutoScalingGroup": "Updates the configuration for the specified Auto Scaling group.
To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.
If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.
Note the following about changing DesiredCapacity
, MaxSize
, or MinSize
:
If a scale-in event occurs as a result of a new DesiredCapacity
value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.
If you specify a new value for MinSize
without specifying a value for DesiredCapacity
, and the new MinSize
is larger than the current size of the group, this sets the group's DesiredCapacity
to the new MinSize
value.
If you specify a new value for MaxSize
without specifying a value for DesiredCapacity
, and the new MaxSize
is smaller than the current size of the group, this sets the group's DesiredCapacity
to the new MaxSize
value.
To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.
" + "UpdateAutoScalingGroup": "Updates the configuration for the specified Auto Scaling group.
To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.
If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.
Note the following about changing DesiredCapacity
, MaxSize
, or MinSize
:
If a scale-in event occurs as a result of a new DesiredCapacity
value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.
If you specify a new value for MinSize
without specifying a value for DesiredCapacity
, and the new MinSize
is larger than the current size of the group, this sets the group's DesiredCapacity
to the new MinSize
value.
If you specify a new value for MaxSize
without specifying a value for DesiredCapacity
, and the new MaxSize
is smaller than the current size of the group, this sets the group's DesiredCapacity
to the new MaxSize
value.
To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.
" }, "shapes": { "Activities": { @@ -136,7 +136,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$AssociatePublicIpAddress": "For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group's instances. If you specify true
, each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.
If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier
when you create your group.
If the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.
For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.
For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.
" + "LaunchConfiguration$AssociatePublicIpAddress": "For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.
For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.
" } }, "AttachInstancesQuery": { @@ -207,7 +207,7 @@ "AutoScalingGroupNames": { "base": null, "refs": { - "AutoScalingGroupNamesType$AutoScalingGroupNames": "The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords
parameter.
If you omit this parameter, all Auto Scaling groups are described.
", + "AutoScalingGroupNamesType$AutoScalingGroupNames": "The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords
parameter.
If you omit this parameter, all Auto Scaling groups are described.
", "DescribeNotificationConfigurationsType$AutoScalingGroupNames": "The name of the Auto Scaling group.
" } }, @@ -289,7 +289,7 @@ "BlockDeviceEbsEncrypted": { "base": null, "refs": { - "Ebs$Encrypted": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.
If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.
Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.
For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.
" + "Ebs$Encrypted": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.
If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.
Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.
For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.
" } }, "BlockDeviceEbsIops": { @@ -301,13 +301,13 @@ "BlockDeviceEbsVolumeSize": { "base": null, "refs": { - "Ebs$VolumeSize": "The volume size, in Gibibytes (GiB).
This can be a number from 1-1,024 for standard
, 4-16,384 for io1
, 1-16,384 for gp2
, and 500-16,384 for st1
and sc1
. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.
Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.
At least one of VolumeSize or SnapshotId is required.
The volume size, in Gibibytes (GiB).
This can be a number from 1-1,024 for standard
, 4-16,384 for io1
, 1-16,384 for gp2
, and 500-16,384 for st1
and sc1
. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.
Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.
At least one of VolumeSize or SnapshotId is required.
The volume type, which can be standard
for Magnetic, io1
for Provisioned IOPS SSD, gp2
for General Purpose SSD, st1
for Throughput Optimized HDD, or sc1
for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.
Valid values: standard
| io1
| gp2
| st1
| sc1
The volume type, which can be standard
for Magnetic, io1
for Provisioned IOPS SSD, gp2
for General Purpose SSD, st1
for Throughput Optimized HDD, or sc1
for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.
Valid Values: standard
| io1
| gp2
| st1
| sc1
The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
If you specify the ClassicLinkVPCId
parameter, you must specify this parameter.
The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId
.
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
" + "CreateLaunchConfigurationType$ClassicLinkVPCSecurityGroups": "The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
If you specify the ClassicLinkVPCId
parameter, you must specify this parameter.
The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId
.
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
" } }, "CompleteLifecycleActionAnswer": { @@ -366,7 +366,7 @@ } }, "CustomizedMetricSpecification": { - "base": "Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.
To create your customized metric specification:
Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics in the Amazon CloudWatch User Guide.
Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases.
For more information about CloudWatch, see Amazon CloudWatch Concepts.
", + "base": "Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.
To create your customized metric specification:
Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics in the Amazon CloudWatch User Guide.
Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases.
For more information about CloudWatch, see Amazon CloudWatch Concepts.
", "refs": { "TargetTrackingConfiguration$CustomizedMetricSpecification": "A customized metric. You must specify either a predefined metric or a customized metric.
" } @@ -551,8 +551,8 @@ "EbsOptimized": { "base": null, "refs": { - "CreateLaunchConfigurationType$EbsOptimized": "Specifies whether the launch configuration is optimized for EBS I/O (true
) or not (false
). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.
The default value is false
.
Specifies whether the launch configuration is optimized for EBS I/O (true
) or not (false
).
For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.
" + "CreateLaunchConfigurationType$EbsOptimized": "Specifies whether the launch configuration is optimized for EBS I/O (true
) or not (false
). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.
The default value is false
.
Specifies whether the launch configuration is optimized for EBS I/O (true
) or not (false
).
For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.
" } }, "EnableMetricsCollectionQuery": { @@ -613,7 +613,7 @@ "FailedScheduledUpdateGroupActionRequests": { "base": null, "refs": { - "BatchDeleteScheduledActionAnswer$FailedScheduledActions": "The names of the scheduled actions that could not be deleted, including an error message.
", + "BatchDeleteScheduledActionAnswer$FailedScheduledActions": "The names of the scheduled actions that could not be deleted, including an error message.
", "BatchPutScheduledUpdateGroupActionAnswer$FailedScheduledUpdateGroupActions": "The names of the scheduled actions that could not be created or updated, including an error message.
" } }, @@ -684,8 +684,8 @@ "InstanceMonitoring": { "base": "Describes whether detailed monitoring is enabled for the Auto Scaling instances.
", "refs": { - "CreateLaunchConfigurationType$InstanceMonitoring": "Controls whether instances in this group are launched with detailed (true
) or basic (false
) monitoring.
The default value is true
(enabled).
When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.
Controls whether instances in this group are launched with detailed (true
) or basic (false
) monitoring.
For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.
" + "CreateLaunchConfigurationType$InstanceMonitoring": "Controls whether instances in this group are launched with detailed (true
) or basic (false
) monitoring.
The default value is true
(enabled).
When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.
Controls whether instances in this group are launched with detailed (true
) or basic (false
) monitoring.
For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.
" } }, "InstanceProtected": { @@ -705,9 +705,9 @@ } }, "InstancesDistribution": { - "base": "Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.
The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.
", + "base": "Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.
The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.
When you update SpotAllocationStrategy
, SpotInstancePools
, or SpotMaxPrice
, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
The instances distribution to use.
If you leave this parameter unspecified when creating a mixed instances policy, the default values are used.
" + "MixedInstancesPolicy$InstancesDistribution": "The instances distribution to use.
If you leave this parameter unspecified, the value for each parameter in InstancesDistribution
uses a default value.
Describes a launch template and overrides.
The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.
", + "base": "Describes a launch template and overrides.
The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.
When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
", "refs": { "MixedInstancesPolicy$LaunchTemplate": "The launch template and instance types (overrides).
This parameter must be specified when creating a mixed instances policy.
" } @@ -761,7 +761,7 @@ } }, "LaunchTemplateOverrides": { - "base": "Describes an override for a launch template.
", + "base": "Describes an override for a launch template.
", "refs": { "Overrides$member": null } @@ -773,7 +773,7 @@ "AutoScalingInstanceDetails$LaunchTemplate": "The launch template for the instance.
", "CreateAutoScalingGroupType$LaunchTemplate": "The launch template to use to launch instances.
For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.
If you do not specify LaunchTemplate
, you must specify one of the following parameters: InstanceId
, LaunchConfigurationName
, or MixedInstancesPolicy
.
The launch template for the instance.
", - "LaunchTemplate$LaunchTemplateSpecification": "The launch template to use. You must specify either the launch template ID or launch template name in the request.
", + "LaunchTemplate$LaunchTemplateSpecification": "The launch template to use. You must specify either the launch template ID or launch template name in the request.
", "UpdateAutoScalingGroupType$LaunchTemplate": "The launch template and version to use to specify the updates. If you specify LaunchTemplate
in your update request, you can't specify LaunchConfigurationName
or MixedInstancesPolicy
.
For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.
" } }, @@ -875,6 +875,14 @@ "DescribeLoadBalancerTargetGroupsResponse$LoadBalancerTargetGroups": "Information about the target groups.
" } }, + "MaxInstanceLifetime": { + "base": null, + "refs": { + "AutoScalingGroup$MaxInstanceLifetime": "The maximum amount of time, in seconds, that an instance can be in service.
Valid Range: Minimum value of 604800.
", + "CreateAutoScalingGroupType$MaxInstanceLifetime": "The maximum amount of time, in seconds, that an instance can be in service.
Valid Range: Minimum value of 604800.
", + "UpdateAutoScalingGroupType$MaxInstanceLifetime": "The maximum amount of time, in seconds, that an instance can be in service.
Valid Range: Minimum value of 604800.
" + } + }, "MaxNumberOfAutoScalingGroups": { "base": null, "refs": { @@ -890,7 +898,7 @@ "MaxRecords": { "base": null, "refs": { - "AutoScalingGroupNamesType$MaxRecords": "The maximum number of items to return with this call. The default value is 50
and the maximum value is 100
.
The maximum number of items to return with this call. The default value is 50
and the maximum value is 100
.
The maximum number of items to return with this call. The default value is 50
and the maximum value is 50
.
The maximum number of items to return with this call. The default value is 100
and the maximum value is 100
.
The maximum number of items to return with this call. The default value is 100
and the maximum value is 100
.
The metric type.
" + "PredefinedMetricSpecification$PredefinedMetricType": "The metric type. The following predefined metrics are available:
ASGAverageCPUUtilization
- Average CPU utilization of the Auto Scaling group.
ASGAverageNetworkIn
- Average number of bytes received on all network interfaces by the Auto Scaling group.
ASGAverageNetworkOut
- Average number of bytes sent out on all network interfaces by the Auto Scaling group.
ALBRequestCountPerTarget
- Number of requests completed per target in an Application Load Balancer target group.
Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.
You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy
as the top-level parameter instead of a launch configuration or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.
The mixed instances policy for the group.
", + "AutoScalingGroup$MixedInstancesPolicy": "The mixed instances policy for the group.
", "CreateAutoScalingGroupType$MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.
The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity, but also the parameters that specify the instance configuration information—the launch template and instance types.
For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.
You must specify one of the following parameters in your request: LaunchConfigurationName
, LaunchTemplate
, InstanceId
, or MixedInstancesPolicy
.
An embedded object that specifies a mixed instances policy.
In your call to UpdateAutoScalingGroup
, you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.
For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.
" } @@ -1071,19 +1079,19 @@ "OnDemandBaseCapacity": { "base": null, "refs": { - "InstancesDistribution$OnDemandBaseCapacity": "The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
The default value is 0
. If you leave this parameter set to 0
, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity
setting.
The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity
setting.
An update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity
. The range is 0–100.
The default value is 100
. If you leave this parameter set to 100
, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity
.
Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
An update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
Valid Range: Minimum value of 0. Maximum value of 100.
" } }, "Overrides": { "base": null, "refs": { - "LaunchTemplate$Overrides": "Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You must specify between 2 and 20 overrides.
" + "LaunchTemplate$Overrides": "An optional setting. Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.
" } }, "PoliciesType": { @@ -1099,7 +1107,7 @@ "PolicyIncrement": { "base": null, "refs": { - "PutScalingPolicyType$ScalingAdjustment": "The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType
parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.
Conditional: If you specify SimpleScaling
for the policy type, you must specify this parameter. (Not used with any other policy type.)
The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType
parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.
Conditional: If you specify SimpleScaling
for the policy type, you must specify this parameter. (Not used with any other policy type.)
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.
", "StepAdjustment$ScalingAdjustment": "The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.
" } @@ -1272,7 +1280,7 @@ "SetDesiredCapacityType$AutoScalingGroupName": "The name of the Auto Scaling group.
", "SetInstanceProtectionQuery$AutoScalingGroupName": "The name of the Auto Scaling group.
", "UpdateAutoScalingGroupType$AutoScalingGroupName": "The name of the Auto Scaling group.
", - "UpdateAutoScalingGroupType$LaunchConfigurationName": "The name of the launch configuration. If you specify LaunchConfigurationName
in your update request, you can't specify LaunchTemplate
or MixedInstancesPolicy
.
To update an Auto Scaling group with a launch configuration with InstanceMonitoring
set to false
, you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.
The name of the launch configuration. If you specify LaunchConfigurationName
in your update request, you can't specify LaunchTemplate
or MixedInstancesPolicy
.
The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide.
" } }, @@ -1317,13 +1325,13 @@ } }, "ScheduledUpdateGroupAction": { - "base": "Describes a scheduled scaling action. Used in response to DescribeScheduledActions.
", + "base": "Describes a scheduled scaling action. Used in response to DescribeScheduledActions.
", "refs": { "ScheduledUpdateGroupActions$member": null } }, "ScheduledUpdateGroupActionRequest": { - "base": "Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.
When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.
", + "base": "Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.
When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.
", "refs": { "ScheduledUpdateGroupActionRequests$member": null } @@ -1331,7 +1339,7 @@ "ScheduledUpdateGroupActionRequests": { "base": null, "refs": { - "BatchPutScheduledUpdateGroupActionType$ScheduledUpdateGroupActions": "One or more scheduled actions. The maximum number allowed is 50.
" + "BatchPutScheduledUpdateGroupActionType$ScheduledUpdateGroupActions": "One or more scheduled actions. The maximum number allowed is 50.
" } }, "ScheduledUpdateGroupActions": { @@ -1344,7 +1352,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$SecurityGroups": "A list that contains the security groups to assign to the instances in the Auto Scaling group.
[EC2-VPC] Specify the security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.
[EC2-Classic] Specify either the security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances.
", - "LaunchConfiguration$SecurityGroups": "A list that contains the security groups to assign to the instances in the Auto Scaling group.
For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.
" + "LaunchConfiguration$SecurityGroups": "A list that contains the security groups to assign to the instances in the Auto Scaling group.
For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.
" } }, "ServiceLinkedRoleFailure": { @@ -1389,14 +1397,14 @@ "SpotInstancePools": { "base": null, "refs": { - "InstancesDistribution$SpotInstancePools": "The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. The range is 1–20. The default value is 2
.
Valid only when the Spot allocation strategy is lowest-price
.
The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2.
Used only when the Spot allocation strategy is lowest-price
.
Valid Range: Minimum value of 1. Maximum value of 20.
" } }, "SpotPrice": { "base": null, "refs": { - "CreateLaunchConfigurationType$SpotPrice": "The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
If a Spot price is set, then the Auto Scaling group will only launch instances when the Spot price has been met, regardless of the setting in the Auto Scaling group's DesiredCapacity
.
When you change your Spot price by creating a new launch configuration, running instances will continue to run as long as the Spot price for those running instances is higher than the current Spot market price.
The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price.
For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
" + "CreateLaunchConfigurationType$SpotPrice": "The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.
The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price.
For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.
" } }, "StepAdjustment": { @@ -1511,7 +1519,7 @@ "LaunchConfiguration$CreatedTime": "The creation date and time for the launch configuration.
", "PutScheduledUpdateGroupActionType$Time": "This parameter is no longer used.
", "PutScheduledUpdateGroupActionType$StartTime": "The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, \"2019-06-01T00:00:00Z\"
).
If you specify Recurrence
and StartTime
, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.
If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.
", - "PutScheduledUpdateGroupActionType$EndTime": "The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.
", + "PutScheduledUpdateGroupActionType$EndTime": "The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.
", "ScheduledUpdateGroupAction$Time": "This parameter is no longer used.
", "ScheduledUpdateGroupAction$StartTime": "The date and time in UTC for this action to start. For example, \"2019-06-01T00:00:00Z\"
.
The date and time in UTC for the recurring schedule to end. For example, \"2019-06-01T00:00:00Z\"
.
The token for the next set of items to return. (You received this token from a previous call.)
", "FailedScheduledUpdateGroupActionRequest$ErrorMessage": "The error message accompanying the error code.
", "Filter$Name": "The name of the filter. The valid values are: \"auto-scaling-group\"
, \"key\"
, \"value\"
, and \"propagate-at-launch\"
.
Indicates how to allocate instance types to fulfill On-Demand capacity.
The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized
, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price
. The default Spot allocation strategy for the AWS Management Console is capacity-optimized
.
Valid values: lowest-price
| capacity-optimized
Indicates how to allocate instance types to fulfill On-Demand capacity.
The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized
, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price
. The default Spot allocation strategy for the AWS Management Console is capacity-optimized
.
Valid values: lowest-price
| capacity-optimized
The token for the next set of items to return. (You received this token from a previous call.)
", "LaunchConfigurationsType$NextToken": "A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken
value when requesting the next set of items. This value is null when there are no more items to return.
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken
value when requesting the next set of items. This value is null when there are no more items to return.
The reason the activity began.
", "LifecycleHook$NotificationMetadata": "Additional information that is included any time Amazon EC2 Auto Scaling sends a message to the notification target.
", "LifecycleHookSpecification$NotificationMetadata": "Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.
", - "PredefinedMetricSpecification$ResourceLabel": "Identifies the resource associated with the metric type. The following predefined metrics are available:
ASGAverageCPUUtilization
- Average CPU utilization of the Auto Scaling group.
ASGAverageNetworkIn
- Average number of bytes received on all network interfaces by the Auto Scaling group.
ASGAverageNetworkOut
- Average number of bytes sent out on all network interfaces by the Auto Scaling group.
ALBRequestCountPerTarget
- Number of requests completed per target in an Application Load Balancer target group.
For predefined metric types ASGAverageCPUUtilization
, ASGAverageNetworkIn
, and ASGAverageNetworkOut
, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget
, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id
, where app/load-balancer-name/load-balancer-id
is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id
is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.
Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget
and there is a target group attached to the Auto Scaling group.
The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id
, where
app/load-balancer-name/load-balancer-id
is the final portion of the load balancer ARN, and
targetgroup/target-group-name/target-group-id
is the final portion of the target group ARN.
Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.
" } }, "XmlStringMaxLen1600": { "base": null, "refs": { - "CreateLaunchConfigurationType$IamInstanceProfile": "The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.
For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.
", - "LaunchConfiguration$IamInstanceProfile": "The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.
For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.
", + "CreateLaunchConfigurationType$IamInstanceProfile": "The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.
For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.
", + "LaunchConfiguration$IamInstanceProfile": "The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.
For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.
", "TerminationPolicies$member": null } }, @@ -1621,6 +1629,7 @@ "AutoScalingGroup$LaunchConfigurationName": "The name of the associated launch configuration.
", "AutoScalingGroup$PlacementGroup": "The name of the placement group into which to launch your instances, if any.
", "AutoScalingGroup$Status": "The current state of the group when DeleteAutoScalingGroup is in progress.
", + "AutoScalingInstanceDetails$InstanceType": "The instance type of the EC2 instance.
", "AutoScalingInstanceDetails$AutoScalingGroupName": "The name of the Auto Scaling group for the instance.
", "AutoScalingInstanceDetails$AvailabilityZone": "The Availability Zone for the instance.
", "AutoScalingInstanceDetails$LaunchConfigurationName": "The launch configuration used to launch the instance. This value is not available if you attached the instance to the Auto Scaling group.
", @@ -1634,26 +1643,27 @@ "CreateLaunchConfigurationType$LaunchConfigurationName": "The name of the launch configuration. This name must be unique per Region per account.
", "CreateLaunchConfigurationType$ImageId": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.
If you do not specify InstanceId
, you must specify ImageId
.
The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.
", - "CreateLaunchConfigurationType$ClassicLinkVPCId": "The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
This parameter can only be used if you are launching EC2-Classic instances.
", + "CreateLaunchConfigurationType$ClassicLinkVPCId": "The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
This parameter can only be used if you are launching EC2-Classic instances.
", "CreateLaunchConfigurationType$InstanceType": "Specifies the instance type of the EC2 instance.
For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.
If you do not specify InstanceId
, you must specify InstanceType
.
The ID of the kernel associated with the AMI.
", "CreateLaunchConfigurationType$RamdiskId": "The ID of the RAM disk to select.
", - "Ebs$SnapshotId": "The snapshot ID of the volume to use.
Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId
and VolumeSize
, VolumeSize
must be equal or greater than the size of the snapshot.
The snapshot ID of the volume to use.
Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId
and VolumeSize
, VolumeSize
must be equal or greater than the size of the snapshot.
The granularity to associate with the metrics to collect. The only valid value is 1Minute
.
One of the following metrics:
GroupMinSize
GroupMaxSize
GroupDesiredCapacity
GroupInServiceInstances
GroupPendingInstances
GroupStandbyInstances
GroupTerminatingInstances
GroupTotalInstances
The granularity of the metric. The only valid value is 1Minute
.
The name of the scheduled action.
", + "Instance$InstanceType": "The instance type of the EC2 instance.
", "Instance$AvailabilityZone": "The Availability Zone in which the instance is running.
", "Instance$LaunchConfigurationName": "The launch configuration associated with the instance.
", "InvalidNextToken$message": "", "LaunchConfiguration$LaunchConfigurationName": "The name of the launch configuration.
", - "LaunchConfiguration$ImageId": "The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.
For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.
", + "LaunchConfiguration$ImageId": "The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.
For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.
", "LaunchConfiguration$KeyName": "The name of the key pair.
For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.
", - "LaunchConfiguration$ClassicLinkVPCId": "The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
", + "LaunchConfiguration$ClassicLinkVPCId": "The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.
", "LaunchConfiguration$InstanceType": "The instance type for the instances.
For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.
", "LaunchConfiguration$KernelId": "The ID of the kernel associated with the AMI.
", "LaunchConfiguration$RamdiskId": "The ID of the RAM disk associated with the AMI.
", - "LaunchTemplateOverrides$InstanceType": "The instance type.
For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.
", + "LaunchTemplateOverrides$InstanceType": "The instance type.
For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.
", "LaunchTemplateSpecification$LaunchTemplateId": "The ID of the launch template. You must specify either a template ID or a template name.
", "LaunchTemplateSpecification$Version": "The version number, $Latest
, or $Default
. If the value is $Latest
, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default
, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default
.
The adjustment type, which specifies how ScalingAdjustment
is interpreted. The valid values are ChangeInCapacity
, ExactCapacity
, and PercentChangeInCapacity
.
The name of the Auto Scaling group.
", "ScheduledUpdateGroupAction$ScheduledActionName": "The name of the scheduled action.
", - "ScheduledUpdateGroupAction$Recurrence": "The recurring schedule for the action, in Unix cron syntax format.
When StartTime
and EndTime
are specified with Recurrence
, they form the boundaries of when the recurring action starts and stops.
The recurring schedule for the action, in Unix cron syntax format.
When StartTime
and EndTime
are specified with Recurrence
, they form the boundaries of when the recurring action starts and stops.
The name of the scaling action.
", "ScheduledUpdateGroupActionRequest$Recurrence": "The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"
). For more information about this format, see Crontab.
When StartTime
and EndTime
are specified with Recurrence
, they form the boundaries of when the recurring action starts and stops.
The service to use for the health checks. The valid values are EC2
and ELB
. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
The lifecycle state for the instance.
", "AutoScalingInstanceDetails$HealthStatus": "The last reported health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.
", + "AutoScalingInstanceDetails$WeightedCapacity": "The number of capacity units contributed by the instance based on its instance type.
Valid Range: Minimum value of 1. Maximum value of 999.
", "CreateAutoScalingGroupType$HealthCheckType": "The service to use for the health checks. The valid values are EC2
and ELB
. The default value is EC2
. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.
", "Instance$HealthStatus": "The last reported health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and that Amazon EC2 Auto Scaling should terminate and replace it.
", + "Instance$WeightedCapacity": "The number of capacity units contributed by the instance based on its instance type.
Valid Range: Minimum value of 1. Maximum value of 999.
", + "LaunchTemplateOverrides$WeightedCapacity": "The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.
Valid Range: Minimum value of 1. Maximum value of 999.
", "PutScalingPolicyType$MetricAggregationType": "The aggregation type for the CloudWatch metrics. The valid values are Minimum
, Maximum
, and Average
. If the aggregation type is null, the value is treated as Average
.
Valid only if the policy type is StepScaling
.
The aggregation type for the CloudWatch metrics. The valid values are Minimum
, Maximum
, and Average
.
The health status of the instance. Set to Healthy
to have the instance remain in service. Set to Unhealthy
to have the instance be out of service. Amazon EC2 Auto Scaling terminates and replaces the unhealthy instance.
The tenancy of the instance. An instance with dedicated
tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.
To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default
), you must set the value of this parameter to dedicated
.
If you specify PlacementTenancy
, you must specify at least one subnet for VPCZoneIdentifier
when you create your group.
For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.
Valid values: default
| dedicated
The tenancy of the instance. An instance with dedicated
tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.
To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default
), you must set the value of this parameter to dedicated
.
If you specify PlacementTenancy
, you must specify at least one subnet for VPCZoneIdentifier
when you create your group.
For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.
Valid Values: default
| dedicated
The error code.
", "LaunchConfiguration$PlacementTenancy": "The tenancy of the instance, either default
or dedicated
. An instance with dedicated
tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.
For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.
", "PolicyTypes$member": null, @@ -1724,7 +1737,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$UserData": "The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.
", - "LaunchConfiguration$UserData": "The Base64-encoded user data to make available to the launched EC2 instances.
For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.
" + "LaunchConfiguration$UserData": "The Base64-encoded user data to make available to the launched EC2 instances.
For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.
" } } } diff --git a/models/apis/ce/2017-10-25/api-2.json b/models/apis/ce/2017-10-25/api-2.json index 67364aedad8..0f9c312ab53 100644 --- a/models/apis/ce/2017-10-25/api-2.json +++ b/models/apis/ce/2017-10-25/api-2.json @@ -1192,6 +1192,7 @@ "CurrencyCode":{"shape":"GenericString"}, "EstimatedSPCost":{"shape":"GenericString"}, "EstimatedOnDemandCost":{"shape":"GenericString"}, + "EstimatedOnDemandCostWithCurrentCommitment":{"shape":"GenericString"}, "EstimatedSavingsAmount":{"shape":"GenericString"}, "EstimatedSavingsPercentage":{"shape":"GenericString"}, "HourlyCommitmentToPurchase":{"shape":"GenericString"}, @@ -1225,7 +1226,8 @@ "DailyCommitmentToPurchase":{"shape":"GenericString"}, "HourlyCommitmentToPurchase":{"shape":"GenericString"}, "EstimatedSavingsPercentage":{"shape":"GenericString"}, - "EstimatedMonthlySavingsAmount":{"shape":"GenericString"} + "EstimatedMonthlySavingsAmount":{"shape":"GenericString"}, + "EstimatedOnDemandCostWithCurrentCommitment":{"shape":"GenericString"} } }, "SavingsPlansSavings":{ diff --git a/models/apis/ce/2017-10-25/docs-2.json b/models/apis/ce/2017-10-25/docs-2.json index 8fb24b13f36..c2f399ba0fe 100644 --- a/models/apis/ce/2017-10-25/docs-2.json +++ b/models/apis/ce/2017-10-25/docs-2.json @@ -388,6 +388,7 @@ "SavingsPlansPurchaseRecommendationDetail$CurrencyCode": "The currency code Amazon Web Services used to generate the recommendations and present potential savings.
", "SavingsPlansPurchaseRecommendationDetail$EstimatedSPCost": "The cost of the recommended Savings Plans over the length of the lookback period.
", "SavingsPlansPurchaseRecommendationDetail$EstimatedOnDemandCost": "The remaining On-Demand cost estimated to not be covered by the recommended Savings Plans, over the length of the lookback period.
", + "SavingsPlansPurchaseRecommendationDetail$EstimatedOnDemandCostWithCurrentCommitment": "The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.
", "SavingsPlansPurchaseRecommendationDetail$EstimatedSavingsAmount": "The estimated savings amount based on the recommended Savings Plans over the length of the lookback period.
", "SavingsPlansPurchaseRecommendationDetail$EstimatedSavingsPercentage": "The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.
", "SavingsPlansPurchaseRecommendationDetail$HourlyCommitmentToPurchase": "The recommended hourly commitment level for the Savings Plans type, and configuration based on the usage during the lookback period.
", @@ -408,6 +409,7 @@ "SavingsPlansPurchaseRecommendationSummary$HourlyCommitmentToPurchase": "The recommended hourly commitment based on the recommendation parameters.
", "SavingsPlansPurchaseRecommendationSummary$EstimatedSavingsPercentage": "The estimated savings relative to the total cost of On-Demand usage, over the lookback period. This is calculated as estimatedSavingsAmount
/ CurrentOnDemandSpend
*100.
The estimated monthly savings amount, based on the recommended Savings Plans purchase.
", + "SavingsPlansPurchaseRecommendationSummary$EstimatedOnDemandCostWithCurrentCommitment": "The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.
", "SavingsPlansSavings$NetSavings": "The savings amount that you are accumulating for the usage that is covered by a Savings Plans, when compared to the On-Demand equivalent of the same usage.
", "SavingsPlansSavings$OnDemandCostEquivalent": "How much the amount that the usage would have cost if it was accrued at the On-Demand rate.
", "SavingsPlansUtilization$TotalCommitment": "The total amount of Savings Plans commitment that's been purchased in an account (or set of accounts).
", @@ -564,7 +566,7 @@ "Granularity": { "base": null, "refs": { - "GetCostAndUsageRequest$Granularity": "Sets the AWS cost granularity to MONTHLY
or DAILY
, or HOURLY
. If Granularity
isn't set, the response object doesn't include the Granularity
, either MONTHLY
or DAILY
, or HOURLY
.
The GetCostAndUsageRequest
operation supports only DAILY
and MONTHLY
granularities.
Sets the AWS cost granularity to MONTHLY
or DAILY
, or HOURLY
. If Granularity
isn't set, the response object doesn't include the Granularity
, either MONTHLY
or DAILY
, or HOURLY
.
Sets the AWS cost granularity to MONTHLY
, DAILY
, or HOURLY
. If Granularity
isn't set, the response object doesn't include the Granularity
, MONTHLY
, DAILY
, or HOURLY
.
How granular you want the forecast to be. You can get 3 months of DAILY
forecasts or 12 months of MONTHLY
forecasts.
The GetCostForecast
operation supports only DAILY
and MONTHLY
granularities.
The granularity of the AWS cost data for the reservation. Valid values are MONTHLY
and DAILY
.
If GroupBy
is set, Granularity
can't be set. If Granularity
isn't set, the response object doesn't include Granularity
, either MONTHLY
or DAILY
.
The GetReservationCoverage
operation supports only DAILY
and MONTHLY
granularities.
You can group AWS costs using up to two different groups, either dimensions, tag keys, or both.
When you group by tag key, you get all tag values, including empty strings.
Valid values are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
, OPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
, TAGS
, TENANCY
, and USAGE_TYPE
.
You can group AWS costs using up to two different groups, either dimensions, tag keys, or both.
When you group by tag key, you get all tag values, including empty strings.
Valid values are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
, OPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
, TAGS
, TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
The groups that are specified by the Filter
or GroupBy
parameters in the request.
You can group Amazon Web Services costs using up to two different groups: either dimensions, tag keys, or both.
", "GetCostAndUsageWithResourcesResponse$GroupDefinitions": "The groups that are specified by the Filter
or GroupBy
parameters in the request.
The Amazon Chime API (application programming interface) is designed for administrators to use to perform key tasks, such as creating and managing Amazon Chime accounts and users. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes.
You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.
You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.
Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.
If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.
When making REST API calls, use the service name chime
and REST endpoint https://service.chime.aws.amazon.com
.
Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Control Access to the Amazon Chime Console in the Amazon Chime Administration Guide.
", + "service": "The Amazon Chime API (application programming interface) is designed for developers to perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. It also includes some server-side API actions to use with the Amazon Chime SDK. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.
You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.
Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.
If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.
When making REST API calls, use the service name chime
and REST endpoint https://service.chime.aws.amazon.com
.
Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide.
", "operations": { "AssociatePhoneNumberWithUser": "Associates a phone number with the specified Amazon Chime user.
", "AssociatePhoneNumbersWithVoiceConnector": "Associates phone numbers with the specified Amazon Chime Voice Connector.
", "AssociatePhoneNumbersWithVoiceConnectorGroup": "Associates phone numbers with the specified Amazon Chime Voice Connector group.
", + "BatchCreateAttendee": "Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", + "BatchCreateRoomMembership": "Adds up to 50 members to a chat room. Members can be either users or bots. The member role designates whether the member is a chat room administrator or a general chat room member.
", "BatchDeletePhoneNumber": "Moves phone numbers into the Deletion queue. Phone numbers must be disassociated from any users or Amazon Chime Voice Connectors before they can be deleted.
Phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.
", "BatchSuspendUser": "Suspends up to 50 users from a Team
or EnterpriseLWA
Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.
Users suspended from a Team
account are dissasociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team
account users, invite them to the Team
account again. You can use the InviteUsers action to do so.
Users suspended from an EnterpriseLWA
account are immediately signed out of Amazon Chime and can no longer sign in. To remove the suspension from suspended EnterpriseLWA
account users, use the BatchUnsuspendUser action.
To sign out users without suspending them, use the LogoutUser action.
", "BatchUnsuspendUser": "Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA
account. Only users on EnterpriseLWA
accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.
Previously suspended users who are unsuspended using this action are returned to Registered
status. Users who are not previously suspended are ignored.
Updates phone number product types or calling names. You can update one attribute at a time for each UpdatePhoneNumberRequestItem
. For example, you can update either the product type or the calling name.
For product types, choose from Amazon Chime Business Calling and Amazon Chime Voice Connector. For toll-free numbers, you must use the Amazon Chime Voice Connector product type.
Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.
", "BatchUpdateUser": "Updates user details within the UpdateUserRequestItem object for up to 20 users for the specified Amazon Chime account. Currently, only LicenseType
updates are supported for this action.
Creates an Amazon Chime account under the administrator's AWS account. Only Team
account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.
Creates a new attendee for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "CreateBot": "Creates a bot for an Amazon Chime Enterprise account.
", + "CreateMeeting": "Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "CreatePhoneNumberOrder": "Creates an order for phone numbers to be provisioned. Choose from Amazon Chime Business Calling and Amazon Chime Voice Connector product types. For toll-free numbers, you must use the Amazon Chime Voice Connector product type.
", + "CreateRoom": "Creates a chat room for the specified Amazon Chime account.
", + "CreateRoomMembership": "Adds a member to a chat room. A member can be either a user or a bot. The member role designates whether the member is a chat room administrator or a general chat room member.
", "CreateVoiceConnector": "Creates an Amazon Chime Voice Connector under the administrator's AWS account. You can choose to create an Amazon Chime Voice Connector in a specific AWS Region.
Enabling CreateVoiceConnectorRequest$RequireEncryption configures your Amazon Chime Voice Connector to use TLS transport for SIP signaling and Secure RTP (SRTP) for media. Inbound calls use TLS transport, and unencrypted outbound calls are blocked.
", "CreateVoiceConnectorGroup": "Creates an Amazon Chime Voice Connector group under the administrator's AWS account. You can associate up to three existing Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems
in the request.
You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for fallback in case of availability events.
", "DeleteAccount": "Deletes the specified Amazon Chime account. You must suspend all users before deleting a Team
account. You can use the BatchSuspendUser action to do so.
For EnterpriseLWA
and EnterpriseAD
accounts, you must release the claimed domains for your Amazon Chime account before deletion. As soon as you release the domain, all users under that account are suspended.
Deleted accounts appear in your Disabled
accounts list for 90 days. To restore a deleted account from your Disabled
accounts list, you must contact AWS Support.
After 90 days, deleted accounts are permanently removed from your Disabled
accounts list.
Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken
. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
Deletes the events configuration that allows a bot to receive outgoing events.
", + "DeleteMeeting": "Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, its attendees are also deleted and clients can no longer join it. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "DeletePhoneNumber": "Moves the specified phone number into the Deletion queue. A phone number must be disassociated from any users or Amazon Chime Voice Connectors before it can be deleted.
Deleted phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.
", + "DeleteRoom": "Deletes a chat room.
", + "DeleteRoomMembership": "Removes a member from a chat room.
", "DeleteVoiceConnector": "Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the Amazon Chime Voice Connector must be disassociated from it before it can be deleted.
", "DeleteVoiceConnectorGroup": "Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems
and phone numbers associated with the group must be removed before it can be deleted.
Deletes the origination settings for the specified Amazon Chime Voice Connector.
", @@ -29,26 +39,33 @@ "DisassociatePhoneNumbersFromVoiceConnectorGroup": "Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector group.
", "GetAccount": "Retrieves details for the specified Amazon Chime account, such as account type and supported licenses.
", "GetAccountSettings": "Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.
", + "GetAttendee": "Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "GetBot": "Retrieves details for the specified bot, such as bot email address, bot type, status, and display name.
", "GetEventsConfiguration": "Gets details for an events configuration that allows a bot to receive outgoing events, such as an HTTPS endpoint or Lambda function ARN.
", "GetGlobalSettings": "Retrieves global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.
", + "GetMeeting": "Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "GetPhoneNumber": "Retrieves details for the specified phone number ID, such as associations, capabilities, and product type.
", "GetPhoneNumberOrder": "Retrieves details for the specified phone number order, such as order creation timestamp, phone numbers in E.164 format, product type, and order status.
", "GetPhoneNumberSettings": "Retrieves the phone number settings for the administrator's AWS account, such as the default outbound calling name.
", + "GetRoom": "Retrieves room details, such as name.
", "GetUser": "Retrieves details for the specified user ID, such as primary email address, license type, and personal meeting PIN.
To retrieve user details with an email address instead of a user ID, use the ListUsers action, and then filter by email address.
", "GetUserSettings": "Retrieves settings for the specified user ID, such as any associated phone number settings.
", "GetVoiceConnector": "Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps, name, outbound host, and encryption requirements.
", "GetVoiceConnectorGroup": "Retrieves details for the specified Amazon Chime Voice Connector group, such as timestamps, name, and associated VoiceConnectorItems
.
Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.
", "GetVoiceConnectorOrigination": "Retrieves origination setting details for the specified Amazon Chime Voice Connector.
", - "GetVoiceConnectorStreamingConfiguration": "Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, in hours.
", + "GetVoiceConnectorStreamingConfiguration": "Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also shows the retention period, in hours, for the Amazon Kinesis data.
", "GetVoiceConnectorTermination": "Retrieves termination setting details for the specified Amazon Chime Voice Connector.
", "GetVoiceConnectorTerminationHealth": "Retrieves information about the last time a SIP OPTIONS
ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector.
Sends email invites to as many as 50 users, inviting them to the specified Amazon Chime Team
account. Only Team
account types are currently supported for this action.
Sends email to a maximum of 50 users, inviting them to the specified Amazon Chime Team
account. Only Team
account types are currently supported for this action.
Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.
", + "ListAttendees": "Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "ListBots": "Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.
", + "ListMeetings": "Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.
", "ListPhoneNumberOrders": "Lists the phone number orders for the administrator's Amazon Chime account.
", "ListPhoneNumbers": "Lists the phone numbers for the specified Amazon Chime account, Amazon Chime user, Amazon Chime Voice Connector, or Amazon Chime Voice Connector group.
", + "ListRoomMemberships": "Lists the membership details for the specified room, such as member IDs, member email addresses, and member names.
", + "ListRooms": "Lists the room details for the specified Amazon Chime account. Optionally, filter the results by a member ID (user ID or bot ID) to see a list of rooms that the member belongs to.
", "ListUsers": "Lists the users that belong to the specified Amazon Chime account. You can specify an email address to list only the user that the email address belongs to.
", "ListVoiceConnectorGroups": "Lists the Amazon Chime Voice Connector groups for the administrator's AWS account.
", "ListVoiceConnectorTerminationCredentials": "Lists the SIP credentials for the specified Amazon Chime Voice Connector.
", @@ -57,7 +74,7 @@ "PutEventsConfiguration": "Creates an events configuration that allows a bot to receive outgoing events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda function ARN. For more information, see Bot.
", "PutVoiceConnectorLoggingConfiguration": "Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.
", "PutVoiceConnectorOrigination": "Adds origination settings for the specified Amazon Chime Voice Connector.
", - "PutVoiceConnectorStreamingConfiguration": "Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis, and sets the retention period for the Amazon Kinesis data, in hours.
", + "PutVoiceConnectorStreamingConfiguration": "Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data.
", "PutVoiceConnectorTermination": "Adds termination settings for the specified Amazon Chime Voice Connector.
", "PutVoiceConnectorTerminationCredentials": "Adds termination SIP credentials for the specified Amazon Chime Voice Connector.
", "RegenerateSecurityToken": "Regenerates the security token for a bot.
", @@ -69,7 +86,9 @@ "UpdateBot": "Updates the status of the specified bot, such as starting or stopping the bot from running in your Amazon Chime Enterprise account.
", "UpdateGlobalSettings": "Updates global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.
", "UpdatePhoneNumber": "Updates phone number details, such as product type or calling name, for the specified phone number ID. You can update one phone number detail at a time. For example, you can update either the product type or the calling name in one action.
For toll-free numbers, you must use the Amazon Chime Voice Connector product type.
Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.
", - "UpdatePhoneNumberSettings": "Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to be updated.
", + "UpdatePhoneNumberSettings": "Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to update.
", + "UpdateRoom": "Updates room details, such as the room name.
", + "UpdateRoomMembership": "Updates room membership details, such as member role. The member role designates whether the member is a chat room administrator or a general chat room member. Member role can only be updated for user IDs.
", "UpdateUser": "Updates user details for a specified user ID. Currently, only LicenseType
updates are supported for this action.
Updates the settings for the specified user, such as phone number settings.
", "UpdateVoiceConnector": "Updates details for the specified Amazon Chime Voice Connector.
", @@ -117,6 +136,13 @@ "Account$AccountType": "The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.
" } }, + "Arn": { + "base": null, + "refs": { + "MeetingNotificationConfiguration$SnsTopicArn": "The SNS topic ARN.
", + "MeetingNotificationConfiguration$SqsQueueArn": "The SQS queue ARN.
" + } + }, "AssociatePhoneNumberWithUserRequest": { "base": null, "refs": { @@ -147,11 +173,52 @@ "refs": { } }, + "Attendee": { + "base": "An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId
and JoinToken
. The JoinToken
allows a client to authenticate and join as the specified attendee. The JoinToken
expires when the meeting ends or when DeleteAttendee is called. After that, the attendee is unable to join the meeting.
We recommend securely transferring each JoinToken
from your server application to the client so that no other client has access to the token except for the one authorized to represent the attendee.
The attendee information, including attendee ID and join token.
", + "GetAttendeeResponse$Attendee": "The Amazon Chime SDK attendee information.
" + } + }, + "AttendeeList": { + "base": null, + "refs": { + "BatchCreateAttendeeResponse$Attendees": "The attendee information, including attendees IDs and join tokens.
", + "ListAttendeesResponse$Attendees": "The Amazon Chime SDK attendee information.
" + } + }, "BadRequestException": { "base": "The input parameters don't match the service's restrictions.
", "refs": { } }, + "BatchCreateAttendeeErrorList": { + "base": null, + "refs": { + "BatchCreateAttendeeResponse$Errors": "If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.
" + } + }, + "BatchCreateAttendeeRequest": { + "base": null, + "refs": { + } + }, + "BatchCreateAttendeeResponse": { + "base": null, + "refs": { + } + }, + "BatchCreateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "BatchCreateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, "BatchDeletePhoneNumberRequest": { "base": null, "refs": { @@ -276,6 +343,13 @@ "Termination$CallingRegions": "The countries to which calls are allowed, in ISO 3166-1 alpha-2 format. Required.
" } }, + "ClientRequestToken": { + "base": null, + "refs": { + "CreateMeetingRequest$ClientRequestToken": "The unique identifier for the client request. Use a different token for different meetings.
", + "CreateRoomRequest$ClientRequestToken": "The idempotency token for the request.
" + } + }, "ConflictException": { "base": "The request could not be processed because of conflict in the current state of the resource.
", "refs": { @@ -297,6 +371,34 @@ "refs": { } }, + "CreateAttendeeError": { + "base": "The list of errors returned when errors are encountered during the BatchCreateAttendee and CreateAttendee actions. This includes external user IDs, error codes, and error messages.
", + "refs": { + "BatchCreateAttendeeErrorList$member": null + } + }, + "CreateAttendeeRequest": { + "base": null, + "refs": { + } + }, + "CreateAttendeeRequestItem": { + "base": "The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee action.
", + "refs": { + "CreateAttendeeRequestItemList$member": null + } + }, + "CreateAttendeeRequestItemList": { + "base": null, + "refs": { + "BatchCreateAttendeeRequest$Attendees": "The request containing the attendees to create.
" + } + }, + "CreateAttendeeResponse": { + "base": null, + "refs": { + } + }, "CreateBotRequest": { "base": null, "refs": { @@ -307,6 +409,16 @@ "refs": { } }, + "CreateMeetingRequest": { + "base": null, + "refs": { + } + }, + "CreateMeetingResponse": { + "base": null, + "refs": { + } + }, "CreatePhoneNumberOrderRequest": { "base": null, "refs": { @@ -317,6 +429,26 @@ "refs": { } }, + "CreateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "CreateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, + "CreateRoomRequest": { + "base": null, + "refs": { + } + }, + "CreateRoomResponse": { + "base": null, + "refs": { + } + }, "CreateVoiceConnectorGroupRequest": { "base": null, "refs": { @@ -352,7 +484,7 @@ "DataRetentionInHours": { "base": null, "refs": { - "StreamingConfiguration$DataRetentionInHours": "The retention period for the Amazon Kinesis data, in hours.
" + "StreamingConfiguration$DataRetentionInHours": "The retention period, in hours, for the Amazon Kinesis data.
" } }, "DeleteAccountRequest": { @@ -365,16 +497,36 @@ "refs": { } }, + "DeleteAttendeeRequest": { + "base": null, + "refs": { + } + }, "DeleteEventsConfigurationRequest": { "base": null, "refs": { } }, + "DeleteMeetingRequest": { + "base": null, + "refs": { + } + }, "DeletePhoneNumberRequest": { "base": null, "refs": { } }, + "DeleteRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "DeleteRoomRequest": { + "base": null, + "refs": { + } + }, "DeleteVoiceConnectorGroupRequest": { "base": null, "refs": { @@ -479,6 +631,7 @@ "BadRequestException$Code": null, "ConflictException$Code": null, "ForbiddenException$Code": null, + "MemberError$ErrorCode": "The error code.
", "NotFoundException$Code": null, "PhoneNumberError$ErrorCode": "The error code.
", "ResourceLimitExceededException$Code": null, @@ -497,6 +650,16 @@ "PutEventsConfigurationResponse$EventsConfiguration": null } }, + "ExternalUserIdType": { + "base": null, + "refs": { + "Attendee$ExternalUserId": "The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.
", + "CreateAttendeeError$ExternalUserId": "The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.
", + "CreateAttendeeRequest$ExternalUserId": "The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.
", + "CreateAttendeeRequestItem$ExternalUserId": "The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.
", + "CreateMeetingRequest$MeetingHostId": "Reserved.
" + } + }, "ForbiddenException": { "base": "The client is permanently forbidden from making the request. For example, when a user tries to create an account from an unsupported Region.
", "refs": { @@ -522,6 +685,16 @@ "refs": { } }, + "GetAttendeeRequest": { + "base": null, + "refs": { + } + }, + "GetAttendeeResponse": { + "base": null, + "refs": { + } + }, "GetBotRequest": { "base": null, "refs": { @@ -547,6 +720,16 @@ "refs": { } }, + "GetMeetingRequest": { + "base": null, + "refs": { + } + }, + "GetMeetingResponse": { + "base": null, + "refs": { + } + }, "GetPhoneNumberOrderRequest": { "base": null, "refs": { @@ -572,6 +755,16 @@ "refs": { } }, + "GetRoomRequest": { + "base": null, + "refs": { + } + }, + "GetRoomResponse": { + "base": null, + "refs": { + } + }, "GetUserRequest": { "base": null, "refs": { @@ -665,7 +858,18 @@ "GuidString": { "base": null, "refs": { + "Attendee$AttendeeId": "The Amazon Chime SDK attendee ID.
", + "BatchCreateAttendeeRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "CreateAttendeeRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "DeleteAttendeeRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "DeleteAttendeeRequest$AttendeeId": "The Amazon Chime SDK attendee ID.
", + "DeleteMeetingRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "GetAttendeeRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "GetAttendeeRequest$AttendeeId": "The Amazon Chime SDK attendee ID.
", + "GetMeetingRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", "GetPhoneNumberOrderRequest$PhoneNumberOrderId": "The ID for the phone number order.
", + "ListAttendeesRequest$MeetingId": "The Amazon Chime SDK meeting ID.
", + "Meeting$MeetingId": "The Amazon Chime SDK meeting ID.
", "PhoneNumberOrder$PhoneNumberOrderId": "The phone number order ID.
" } }, @@ -678,7 +882,7 @@ "InviteList": { "base": null, "refs": { - "InviteUsersResponse$Invites": "The invite details.
" + "InviteUsersResponse$Invites": "The email invitation details.
" } }, "InviteStatus": { @@ -711,6 +915,9 @@ "PhoneNumberAssociation$AssociatedTimestamp": "The timestamp of the phone number association, in ISO 8601 format.
", "PhoneNumberOrder$CreatedTimestamp": "The phone number order creation timestamp, in ISO 8601 format.
", "PhoneNumberOrder$UpdatedTimestamp": "The updated phone number order timestamp, in ISO 8601 format.
", + "Room$CreatedTimestamp": "The room creation timestamp, in ISO 8601 format.
", + "Room$UpdatedTimestamp": "The room update timestamp, in ISO 8601 format.
", + "RoomMembership$UpdatedTimestamp": "The room membership update timestamp, in ISO 8601 format.
", "TerminationHealth$Timestamp": "The timestamp, in ISO 8601 format.
", "User$RegisteredOn": "Date and time when the user is registered, in ISO 8601 format.
", "User$InvitedOn": "Date and time when the user is invited to the Amazon Chime account, in ISO 8601 format.
", @@ -720,6 +927,12 @@ "VoiceConnectorGroup$UpdatedTimestamp": "The updated Amazon Chime Voice Connector group timestamp, in ISO 8601 format.
" } }, + "JoinTokenString": { + "base": null, + "refs": { + "Attendee$JoinToken": "The join token used by the Amazon Chime SDK attendee.
" + } + }, "License": { "base": null, "refs": { @@ -746,6 +959,16 @@ "refs": { } }, + "ListAttendeesRequest": { + "base": null, + "refs": { + } + }, + "ListAttendeesResponse": { + "base": null, + "refs": { + } + }, "ListBotsRequest": { "base": null, "refs": { @@ -756,6 +979,16 @@ "refs": { } }, + "ListMeetingsRequest": { + "base": null, + "refs": { + } + }, + "ListMeetingsResponse": { + "base": null, + "refs": { + } + }, "ListPhoneNumberOrdersRequest": { "base": null, "refs": { @@ -776,6 +1009,26 @@ "refs": { } }, + "ListRoomMembershipsRequest": { + "base": null, + "refs": { + } + }, + "ListRoomMembershipsResponse": { + "base": null, + "refs": { + } + }, + "ListRoomsRequest": { + "base": null, + "refs": { + } + }, + "ListRoomsResponse": { + "base": null, + "refs": { + } + }, "ListUsersRequest": { "base": null, "refs": { @@ -834,19 +1087,92 @@ "refs": { } }, + "MediaPlacement": { + "base": "A set of endpoints used by clients to connect to the media service group for a Amazon Chime SDK meeting.
", + "refs": { + "Meeting$MediaPlacement": "The media placement for the meeting.
" + } + }, + "Meeting": { + "base": "A meeting created using the Amazon Chime SDK.
", + "refs": { + "CreateMeetingResponse$Meeting": "The meeting information, including the meeting ID and MediaPlacement
.
The Amazon Chime SDK meeting information.
", + "MeetingList$member": null + } + }, + "MeetingList": { + "base": null, + "refs": { + "ListMeetingsResponse$Meetings": "The Amazon Chime SDK meeting information.
" + } + }, + "MeetingNotificationConfiguration": { + "base": "The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur.
", + "refs": { + "CreateMeetingRequest$NotificationsConfiguration": "The configuration for resource targets to receive notifications when meeting and attendee events occur.
" + } + }, + "Member": { + "base": "The member details, such as email address, name, member ID, and member type.
", + "refs": { + "RoomMembership$Member": null + } + }, + "MemberError": { + "base": "The list of errors returned when a member action results in an error.
", + "refs": { + "MemberErrorList$member": null + } + }, + "MemberErrorList": { + "base": null, + "refs": { + "BatchCreateRoomMembershipResponse$Errors": "If the action fails for one or more of the member IDs in the request, a list of the member IDs is returned, along with error codes and error messages.
" + } + }, + "MemberType": { + "base": null, + "refs": { + "Member$MemberType": "The member type.
" + } + }, + "MembershipItem": { + "base": "Membership details, such as member ID and member role.
", + "refs": { + "MembershipItemList$member": null + } + }, + "MembershipItemList": { + "base": null, + "refs": { + "BatchCreateRoomMembershipRequest$MembershipItemList": "The list of membership items.
" + } + }, "NonEmptyString": { "base": null, "refs": { "AssociatePhoneNumbersWithVoiceConnectorGroupRequest$VoiceConnectorGroupId": "The Amazon Chime Voice Connector group ID.
", "AssociatePhoneNumbersWithVoiceConnectorRequest$VoiceConnectorId": "The Amazon Chime Voice Connector ID.
", + "BatchCreateRoomMembershipRequest$AccountId": "The Amazon Chime account ID.
", + "BatchCreateRoomMembershipRequest$RoomId": "The room ID.
", "BatchSuspendUserRequest$AccountId": "The Amazon Chime account ID.
", "BatchUnsuspendUserRequest$AccountId": "The Amazon Chime account ID.
", "BatchUpdateUserRequest$AccountId": "The Amazon Chime account ID.
", "CreateBotRequest$AccountId": "The Amazon Chime account ID.
", "CreateBotRequest$Domain": "The domain of the Amazon Chime Enterprise account.
", + "CreateRoomMembershipRequest$AccountId": "The Amazon Chime account ID.
", + "CreateRoomMembershipRequest$RoomId": "The room ID.
", + "CreateRoomMembershipRequest$MemberId": "The Amazon Chime member ID (user ID or bot ID).
", + "CreateRoomRequest$AccountId": "The Amazon Chime account ID.
", "DeleteAccountRequest$AccountId": "The Amazon Chime account ID.
", "DeleteEventsConfigurationRequest$AccountId": "The Amazon Chime account ID.
", "DeleteEventsConfigurationRequest$BotId": "The bot ID.
", + "DeleteRoomMembershipRequest$AccountId": "The Amazon Chime account ID.
", + "DeleteRoomMembershipRequest$RoomId": "The room ID.
", + "DeleteRoomMembershipRequest$MemberId": "The member ID (user ID or bot ID).
", + "DeleteRoomRequest$AccountId": "The Amazon Chime account ID.
", + "DeleteRoomRequest$RoomId": "The chat room ID.
", "DeleteVoiceConnectorGroupRequest$VoiceConnectorGroupId": "The Amazon Chime Voice Connector group ID.
", "DeleteVoiceConnectorOriginationRequest$VoiceConnectorId": "The Amazon Chime Voice Connector ID.
", "DeleteVoiceConnectorRequest$VoiceConnectorId": "The Amazon Chime Voice Connector ID.
", @@ -861,6 +1187,8 @@ "GetBotRequest$BotId": "The bot ID.
", "GetEventsConfigurationRequest$AccountId": "The Amazon Chime account ID.
", "GetEventsConfigurationRequest$BotId": "The bot ID.
", + "GetRoomRequest$AccountId": "The Amazon Chime account ID.
", + "GetRoomRequest$RoomId": "The room ID.
", "GetUserRequest$AccountId": "The Amazon Chime account ID.
", "GetUserRequest$UserId": "The user ID.
", "GetVoiceConnectorGroupRequest$VoiceConnectorGroupId": "The Amazon Chime Voice Connector group ID.
", @@ -872,10 +1200,17 @@ "GetVoiceConnectorTerminationRequest$VoiceConnectorId": "The Amazon Chime Voice Connector ID.
", "InviteUsersRequest$AccountId": "The Amazon Chime account ID.
", "ListBotsRequest$AccountId": "The Amazon Chime account ID.
", + "ListRoomMembershipsRequest$AccountId": "The Amazon Chime account ID.
", + "ListRoomMembershipsRequest$RoomId": "The room ID.
", + "ListRoomsRequest$AccountId": "The Amazon Chime account ID.
", "ListUsersRequest$AccountId": "The Amazon Chime account ID.
", "ListVoiceConnectorTerminationCredentialsRequest$VoiceConnectorId": "The Amazon Chime Voice Connector ID.
", "LogoutUserRequest$AccountId": "The Amazon Chime account ID.
", "LogoutUserRequest$UserId": "The user ID.
", + "Member$MemberId": "The member ID (user ID or bot ID).
", + "Member$AccountId": "The Amazon Chime account ID.
", + "MemberError$MemberId": "The member ID.
", + "MembershipItem$MemberId": "The member ID.
", "PhoneNumberError$PhoneNumberId": "The phone number ID for which the action failed.
", "PutEventsConfigurationRequest$AccountId": "The Amazon Chime account ID.
", "PutEventsConfigurationRequest$BotId": "The bot ID.
", @@ -889,11 +1224,21 @@ "ResetPersonalPINRequest$AccountId": "The Amazon Chime account ID.
", "ResetPersonalPINRequest$UserId": "The user ID.
", "RestorePhoneNumberRequest$PhoneNumberId": "The phone number.
", + "Room$RoomId": "The room ID.
", + "Room$AccountId": "The Amazon Chime account ID.
", + "Room$CreatedBy": "The identifier of the room creator.
", + "RoomMembership$RoomId": "The room ID.
", + "RoomMembership$InvitedBy": "The identifier of the user that invited the room member.
", "UpdateAccountRequest$AccountId": "The Amazon Chime account ID.
", "UpdateAccountSettingsRequest$AccountId": "The Amazon Chime account ID.
", "UpdateBotRequest$AccountId": "The Amazon Chime account ID.
", "UpdateBotRequest$BotId": "The bot ID.
", "UpdatePhoneNumberRequestItem$PhoneNumberId": "The phone number ID to update.
", + "UpdateRoomMembershipRequest$AccountId": "The Amazon Chime account ID.
", + "UpdateRoomMembershipRequest$RoomId": "The room ID.
", + "UpdateRoomMembershipRequest$MemberId": "The member ID.
", + "UpdateRoomRequest$AccountId": "The Amazon Chime account ID.
", + "UpdateRoomRequest$RoomId": "The room ID.
", "UpdateUserRequest$AccountId": "The Amazon Chime account ID.
", "UpdateUserRequest$UserId": "The user ID.
", "UpdateUserRequestItem$UserId": "The user ID.
", @@ -1207,13 +1552,55 @@ "ResultMax": { "base": null, "refs": { - "ListBotsRequest$MaxResults": "The maximum number of results to return in a single call. Default is 10.
", + "ListAttendeesRequest$MaxResults": "The maximum number of results to return in a single call.
", + "ListBotsRequest$MaxResults": "The maximum number of results to return in a single call. The default is 10.
", + "ListMeetingsRequest$MaxResults": "The maximum number of results to return in a single call.
", "ListPhoneNumberOrdersRequest$MaxResults": "The maximum number of results to return in a single call.
", "ListPhoneNumbersRequest$MaxResults": "The maximum number of results to return in a single call.
", + "ListRoomMembershipsRequest$MaxResults": "The maximum number of results to return in a single call.
", + "ListRoomsRequest$MaxResults": "The maximum number of results to return in a single call.
", "ListVoiceConnectorGroupsRequest$MaxResults": "The maximum number of results to return in a single call.
", "ListVoiceConnectorsRequest$MaxResults": "The maximum number of results to return in a single call.
" } }, + "Room": { + "base": "The Amazon Chime chat room details.
", + "refs": { + "CreateRoomResponse$Room": "The room details.
", + "GetRoomResponse$Room": "The room details.
", + "RoomList$member": null, + "UpdateRoomResponse$Room": "The room details.
" + } + }, + "RoomList": { + "base": null, + "refs": { + "ListRoomsResponse$Rooms": "The room details.
" + } + }, + "RoomMembership": { + "base": "The room membership details.
", + "refs": { + "CreateRoomMembershipResponse$RoomMembership": "The room membership details.
", + "RoomMembershipList$member": null, + "UpdateRoomMembershipResponse$RoomMembership": "The room membership details.
" + } + }, + "RoomMembershipList": { + "base": null, + "refs": { + "ListRoomMembershipsResponse$RoomMemberships": "The room membership details.
" + } + }, + "RoomMembershipRole": { + "base": null, + "refs": { + "CreateRoomMembershipRequest$Role": "The role of the member.
", + "MembershipItem$Role": "The member role.
", + "RoomMembership$Role": "The membership role.
", + "UpdateRoomMembershipRequest$Role": "The role of the member.
" + } + }, "SearchAvailablePhoneNumbersRequest": { "base": null, "refs": { @@ -1231,13 +1618,18 @@ "Bot$BotEmail": "The bot email address.
", "Bot$SecurityToken": "The security token used to authenticate Amazon Chime with the outgoing event endpoint.
", "CreateBotRequest$DisplayName": "The bot display name.
", + "CreateRoomRequest$Name": "The room name.
", "Credential$Username": "The RFC2617 compliant user name associated with the SIP credentials, in US-ASCII format.
", "Credential$Password": "The RFC2617 compliant password associated with the SIP credentials, in US-ASCII format.
", "EventsConfiguration$OutboundEventsHTTPSEndpoint": "HTTPS endpoint that allows a bot to receive outgoing events.
", "EventsConfiguration$LambdaFunctionArn": "Lambda function ARN that allows a bot to receive outgoing events.
", + "Member$Email": "The member email address.
", + "Member$FullName": "The member name.
", "PutEventsConfigurationRequest$OutboundEventsHTTPSEndpoint": "HTTPS endpoint that allows the bot to receive outgoing events.
", "PutEventsConfigurationRequest$LambdaFunctionArn": "Lambda function ARN that allows the bot to receive outgoing events.
", + "Room$Name": "The room name.
", "SensitiveStringList$member": null, + "UpdateRoomRequest$Name": "The room name.
", "User$PrimaryProvisionedNumber": "The primary phone number associated with the user.
", "User$DisplayName": "The display name of the user.
" } @@ -1281,6 +1673,9 @@ "Bot$UserId": "The unique ID for the bot user.
", "BusinessCallingSettings$CdrBucket": "The Amazon S3 bucket designated for call detail record storage.
", "ConflictException$Message": null, + "CreateAttendeeError$ErrorCode": "The error code.
", + "CreateAttendeeError$ErrorMessage": "The error message.
", + "CreateMeetingRequest$MediaRegion": "The Region in which to create the meeting. Available values: us-east-1
, us-west-2
.
The phone number ID.
", "DisassociatePhoneNumberFromUserRequest$AccountId": "The Amazon Chime account ID.
", "DisassociatePhoneNumberFromUserRequest$UserId": "The user ID.
", @@ -1292,19 +1687,30 @@ "Invite$InviteId": "The invite ID.
", "ListAccountsRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListAccountsResponse$NextToken": "The token to use to retrieve the next page of results.
", + "ListAttendeesRequest$NextToken": "The token to use to retrieve the next page of results.
", + "ListAttendeesResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListBotsRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListBotsResponse$NextToken": "The token to use to retrieve the next page of results.
", + "ListMeetingsRequest$NextToken": "The token to use to retrieve the next page of results.
", + "ListMeetingsResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListPhoneNumberOrdersRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListPhoneNumberOrdersResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListPhoneNumbersRequest$FilterValue": "The value to use for the filter.
", "ListPhoneNumbersRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListPhoneNumbersResponse$NextToken": "The token to use to retrieve the next page of results.
", + "ListRoomMembershipsRequest$NextToken": "The token to use to retrieve the next page of results.
", + "ListRoomMembershipsResponse$NextToken": "The token to use to retrieve the next page of results.
", + "ListRoomsRequest$MemberId": "The member ID (user ID or bot ID).
", + "ListRoomsRequest$NextToken": "The token to use to retrieve the next page of results.
", + "ListRoomsResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListUsersRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListUsersResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListVoiceConnectorGroupsRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListVoiceConnectorGroupsResponse$NextToken": "The token to use to retrieve the next page of results.
", "ListVoiceConnectorsRequest$NextToken": "The token to use to retrieve the next page of results.
", "ListVoiceConnectorsResponse$NextToken": "The token to use to retrieve the next page of results.
", + "Meeting$MediaRegion": "The Region in which to create the meeting. Available values: us-east-1
, us-west-2
.
The error message.
", "NonEmptyStringList$member": null, "NotFoundException$Message": null, "OriginationRoute$Host": "The FQDN or IP address to contact for origination traffic.
", @@ -1444,6 +1850,26 @@ "refs": { } }, + "UpdateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "UpdateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, + "UpdateRoomRequest": { + "base": null, + "refs": { + } + }, + "UpdateRoomResponse": { + "base": null, + "refs": { + } + }, "UpdateUserRequest": { "base": null, "refs": { @@ -1491,6 +1917,17 @@ "refs": { } }, + "UriType": { + "base": null, + "refs": { + "MediaPlacement$AudioHostUrl": "The audio host URL.
", + "MediaPlacement$ScreenDataUrl": "The screen data URL.
", + "MediaPlacement$ScreenSharingUrl": "The screen sharing URL.
", + "MediaPlacement$ScreenViewingUrl": "The screen viewing URL.
", + "MediaPlacement$SignalingUrl": "The signaling URL.
", + "MediaPlacement$TurnControlUrl": "The turn control URL.
" + } + }, "User": { "base": "The user on the Amazon Chime account.
", "refs": { @@ -1503,7 +1940,7 @@ "UserEmailList": { "base": null, "refs": { - "InviteUsersRequest$UserEmailList": "The user email addresses to which to send the invite.
" + "InviteUsersRequest$UserEmailList": "The user email addresses to which to send the email invitation.
" } }, "UserError": { diff --git a/models/apis/chime/2018-05-01/paginators-1.json b/models/apis/chime/2018-05-01/paginators-1.json index 1a3a06e0cfe..7d55169a037 100644 --- a/models/apis/chime/2018-05-01/paginators-1.json +++ b/models/apis/chime/2018-05-01/paginators-1.json @@ -5,11 +5,21 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListAttendees": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListBots": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListMeetings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListPhoneNumberOrders": { "input_token": "NextToken", "output_token": "NextToken", @@ -20,6 +30,16 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListRoomMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRooms": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListUsers": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/cloudformation/2010-05-15/api-2.json b/models/apis/cloudformation/2010-05-15/api-2.json index 0e71f2be72f..f6c8345483a 100644 --- a/models/apis/cloudformation/2010-05-15/api-2.json +++ b/models/apis/cloudformation/2010-05-15/api-2.json @@ -170,6 +170,23 @@ {"shape":"OperationInProgressException"} ] }, + "DeregisterType":{ + "name":"DeregisterType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterTypeInput"}, + "output":{ + "shape":"DeregisterTypeOutput", + "resultWrapper":"DeregisterTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -316,6 +333,39 @@ "resultWrapper":"DescribeStacksResult" } }, + "DescribeType":{ + "name":"DescribeType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTypeInput"}, + "output":{ + "shape":"DescribeTypeOutput", + "resultWrapper":"DescribeTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, + "DescribeTypeRegistration":{ + "name":"DescribeTypeRegistration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTypeRegistrationInput"}, + "output":{ + "shape":"DescribeTypeRegistrationOutput", + "resultWrapper":"DescribeTypeRegistrationResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, "DetectStackDrift":{ "name":"DetectStackDrift", "http":{ @@ -340,6 +390,23 @@ "resultWrapper":"DetectStackResourceDriftResult" } }, + "DetectStackSetDrift":{ + "name":"DetectStackSetDrift", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetectStackSetDriftInput"}, + "output":{ + "shape":"DetectStackSetDriftOutput", + "resultWrapper":"DetectStackSetDriftResult" + }, + "errors":[ + {"shape":"InvalidOperationException"}, + {"shape":"OperationInProgressException"}, + {"shape":"StackSetNotFoundException"} + ] + }, "EstimateTemplateCost":{ "name":"EstimateTemplateCost", "http":{ @@ -530,6 +597,87 @@ "resultWrapper":"ListStacksResult" } }, + "ListTypeRegistrations":{ + "name":"ListTypeRegistrations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypeRegistrationsInput"}, + "output":{ + "shape":"ListTypeRegistrationsOutput", + "resultWrapper":"ListTypeRegistrationsResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "ListTypeVersions":{ + "name":"ListTypeVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypeVersionsInput"}, + "output":{ + "shape":"ListTypeVersionsOutput", + "resultWrapper":"ListTypeVersionsResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "ListTypes":{ + "name":"ListTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypesInput"}, + "output":{ + "shape":"ListTypesOutput", + "resultWrapper":"ListTypesResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "RecordHandlerProgress":{ + "name":"RecordHandlerProgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordHandlerProgressInput"}, + "output":{ + "shape":"RecordHandlerProgressOutput", + "resultWrapper":"RecordHandlerProgressResult" + }, + "errors":[ + {"shape":"InvalidStateTransitionException"}, + {"shape":"OperationStatusCheckFailedException"} + ], + "idempotent":true + }, + "RegisterType":{ + "name":"RegisterType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterTypeInput"}, + "output":{ + "shape":"RegisterTypeOutput", + "resultWrapper":"RegisterTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, "SetStackPolicy":{ "name":"SetStackPolicy", "http":{ @@ -538,6 +686,23 @@ }, "input":{"shape":"SetStackPolicyInput"} }, + "SetTypeDefaultVersion":{ + "name":"SetTypeDefaultVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTypeDefaultVersionInput"}, + "output":{ + "shape":"SetTypeDefaultVersionOutput", + "resultWrapper":"SetTypeDefaultVersionResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, "SignalResource":{ "name":"SignalResource", "http":{ @@ -707,6 +872,18 @@ "max":100, "min":1 }, + "CFNRegistryException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"CFNRegistryException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "CancelUpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -1039,6 +1216,27 @@ } }, "DeletionTime":{"type":"timestamp"}, + "DeprecatedStatus":{ + "type":"string", + "enum":[ + "LIVE", + "DEPRECATED" + ] + }, + "DeregisterTypeInput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"PrivateTypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "DeregisterTypeOutput":{ + "type":"structure", + "members":{ + } + }, "DescribeAccountLimitsInput":{ "type":"structure", "members":{ @@ -1233,6 +1431,51 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeTypeInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "Arn":{"shape":"TypeArn"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "DescribeTypeOutput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"TypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "DefaultVersionId":{"shape":"TypeVersionId"}, + "Description":{"shape":"Description"}, + "Schema":{"shape":"TypeSchema"}, + "ProvisioningType":{"shape":"ProvisioningType"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "LoggingConfig":{"shape":"LoggingConfig"}, + "ExecutionRoleArn":{"shape":"RoleArn"}, + "Visibility":{"shape":"Visibility"}, + "SourceUrl":{"shape":"OptionalSecureUrl"}, + "DocumentationUrl":{"shape":"OptionalSecureUrl"}, + "LastUpdated":{"shape":"Timestamp"}, + "TimeCreated":{"shape":"Timestamp"} + } + }, + "DescribeTypeRegistrationInput":{ + "type":"structure", + "required":["RegistrationToken"], + "members":{ + "RegistrationToken":{"shape":"RegistrationToken"} + } + }, + "DescribeTypeRegistrationOutput":{ + "type":"structure", + "members":{ + "ProgressStatus":{"shape":"RegistrationStatus"}, + "Description":{"shape":"Description"}, + "TypeArn":{"shape":"TypeArn"}, + "TypeVersionArn":{"shape":"TypeArn"} + } + }, "Description":{ "type":"string", "max":1024, @@ -1271,6 +1514,24 @@ "StackResourceDrift":{"shape":"StackResourceDrift"} } }, + "DetectStackSetDriftInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetNameOrId"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationId":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "DetectStackSetDriftOutput":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"} + } + }, "DifferenceType":{ "type":"string", "enum":[ @@ -1280,7 +1541,16 @@ ] }, "DisableRollback":{"type":"boolean"}, + "DriftedStackInstancesCount":{ + "type":"integer", + "min":0 + }, "EnableTerminationProtection":{"type":"boolean"}, + "ErrorMessage":{ + "type":"string", + "max":255, + "min":1 + }, "EstimateTemplateCostInput":{ "type":"structure", "members":{ @@ -1348,6 +1618,10 @@ "type":"list", "member":{"shape":"Export"} }, + "FailedStackInstancesCount":{ + "type":"integer", + "min":0 + }, "FailureToleranceCount":{ "type":"integer", "min":0 @@ -1408,10 +1682,37 @@ "ResourceIdentifierSummaries":{"shape":"ResourceIdentifierSummaries"} } }, + "HandlerErrorCode":{ + "type":"string", + "enum":[ + "NotUpdatable", + "InvalidRequest", + "AccessDenied", + "InvalidCredentials", + "AlreadyExists", + "NotFound", + "ResourceConflict", + "Throttling", + "ServiceLimitExceeded", + "NotStabilized", + "GeneralServiceException", + "ServiceInternalError", + "NetworkFailure", + "InternalFailure" + ] + }, "Imports":{ "type":"list", "member":{"shape":"StackName"} }, + "InProgressStackInstancesCount":{ + "type":"integer", + "min":0 + }, + "InSyncStackInstancesCount":{ + "type":"integer", + "min":0 + }, "InsufficientCapabilitiesException":{ "type":"structure", "members":{ @@ -1445,6 +1746,17 @@ }, "exception":true }, + "InvalidStateTransitionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidStateTransition", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "Key":{"type":"string"}, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ @@ -1601,6 +1913,76 @@ "NextToken":{"shape":"NextToken"} } }, + "ListTypeRegistrationsInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "TypeArn":{"shape":"TypeArn"}, + "RegistrationStatusFilter":{"shape":"RegistrationStatus"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypeRegistrationsOutput":{ + "type":"structure", + "members":{ + "RegistrationTokenList":{"shape":"RegistrationTokenList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypeVersionsInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "Arn":{"shape":"PrivateTypeArn"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"} + } + }, + "ListTypeVersionsOutput":{ + "type":"structure", + "members":{ + "TypeVersionSummaries":{"shape":"TypeVersionSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypesInput":{ + "type":"structure", + "members":{ + "Visibility":{"shape":"Visibility"}, + "ProvisioningType":{"shape":"ProvisioningType"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypesOutput":{ + "type":"structure", + "members":{ + "TypeSummaries":{"shape":"TypeSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "LogRoleArn", + "LogGroupName" + ], + "members":{ + "LogRoleArn":{"shape":"RoleArn"}, + "LogGroupName":{"shape":"LogGroupName"} + } + }, "LogicalResourceId":{"type":"string"}, "LogicalResourceIds":{ "type":"list", @@ -1692,6 +2074,30 @@ }, "exception":true }, + "OperationStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "OperationStatusCheckFailedException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ConditionalCheckFailed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionalSecureUrl":{ + "type":"string", + "max":4096 + }, "Output":{ "type":"structure", "members":{ @@ -1761,6 +2167,11 @@ "Value":{"shape":"Value"} } }, + "PrivateTypeArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:[0-9]{12}:type/.+" + }, "Properties":{"type":"string"}, "PropertyDifference":{ "type":"structure", @@ -1784,12 +2195,84 @@ "PropertyName":{"type":"string"}, "PropertyPath":{"type":"string"}, "PropertyValue":{"type":"string"}, + "ProvisioningType":{ + "type":"string", + "enum":[ + "NON_PROVISIONABLE", + "IMMUTABLE", + "FULLY_MUTABLE" + ] + }, "Reason":{"type":"string"}, + "RecordHandlerProgressInput":{ + "type":"structure", + "required":[ + "BearerToken", + "OperationStatus" + ], + "members":{ + "BearerToken":{"shape":"ClientToken"}, + "OperationStatus":{"shape":"OperationStatus"}, + "CurrentOperationStatus":{"shape":"OperationStatus"}, + "StatusMessage":{"shape":"StatusMessage"}, + "ErrorCode":{"shape":"HandlerErrorCode"}, + "ResourceModel":{"shape":"ResourceModel"}, + "ClientRequestToken":{"shape":"ClientRequestToken"} + } + }, + "RecordHandlerProgressOutput":{ + "type":"structure", + "members":{ + } + }, "Region":{"type":"string"}, "RegionList":{ "type":"list", "member":{"shape":"Region"} }, + "RegisterTypeInput":{ + "type":"structure", + "required":[ + "TypeName", + "SchemaHandlerPackage" + ], + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "SchemaHandlerPackage":{"shape":"S3Url"}, + "LoggingConfig":{"shape":"LoggingConfig"}, + "ExecutionRoleArn":{"shape":"RoleArn"}, + "ClientRequestToken":{"shape":"RequestToken"} + } + }, + "RegisterTypeOutput":{ + "type":"structure", + "members":{ + "RegistrationToken":{"shape":"RegistrationToken"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "IN_PROGRESS", + "FAILED" + ] + }, + "RegistrationToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" + }, + "RegistrationTokenList":{ + "type":"list", + "member":{"shape":"RegistrationToken"} + }, + "RegistryType":{ + "type":"string", + "enum":["RESOURCE"] + }, "Replacement":{ "type":"string", "enum":[ @@ -1798,6 +2281,12 @@ "Conditional" ] }, + "RequestToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" + }, "RequiresRecreation":{ "type":"string", "enum":[ @@ -1875,6 +2364,11 @@ "type":"list", "member":{"shape":"ResourceIdentifierPropertyKey"} }, + "ResourceModel":{ + "type":"string", + "max":16384, + "min":1 + }, "ResourceProperties":{"type":"string"}, "ResourceSignalStatus":{ "type":"string", @@ -1964,6 +2458,12 @@ "max":2048, "min":20 }, + "RoleArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"arn:.+:iam::[0-9]{12}:role/.+" + }, "RollbackConfiguration":{ "type":"structure", "members":{ @@ -1987,6 +2487,11 @@ "member":{"shape":"RollbackTrigger"}, "max":5 }, + "S3Url":{ + "type":"string", + "max":4096, + "min":1 + }, "Scope":{ "type":"list", "member":{"shape":"ResourceAttribute"} @@ -2000,6 +2505,20 @@ "StackPolicyURL":{"shape":"StackPolicyURL"} } }, + "SetTypeDefaultVersionInput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"PrivateTypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "SetTypeDefaultVersionOutput":{ + "type":"structure", + "members":{ + } + }, "SignalResourceInput":{ "type":"structure", "required":[ @@ -2122,7 +2641,9 @@ "StackId":{"shape":"StackId"}, "ParameterOverrides":{"shape":"Parameters"}, "Status":{"shape":"StackInstanceStatus"}, - "StatusReason":{"shape":"Reason"} + "StatusReason":{"shape":"Reason"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackInstanceNotFoundException":{ @@ -2156,7 +2677,9 @@ "Account":{"shape":"Account"}, "StackId":{"shape":"StackId"}, "Status":{"shape":"StackInstanceStatus"}, - "StatusReason":{"shape":"Reason"} + "StatusReason":{"shape":"Reason"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackName":{"type":"string"}, @@ -2324,10 +2847,42 @@ "Tags":{"shape":"Tags"}, "StackSetARN":{"shape":"StackSetARN"}, "AdministrationRoleARN":{"shape":"RoleARN"}, - "ExecutionRoleName":{"shape":"ExecutionRoleName"} + "ExecutionRoleName":{"shape":"ExecutionRoleName"}, + "StackSetDriftDetectionDetails":{"shape":"StackSetDriftDetectionDetails"} } }, "StackSetARN":{"type":"string"}, + "StackSetDriftDetectionDetails":{ + "type":"structure", + "members":{ + "DriftStatus":{"shape":"StackSetDriftStatus"}, + "DriftDetectionStatus":{"shape":"StackSetDriftDetectionStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"}, + "TotalStackInstancesCount":{"shape":"TotalStackInstancesCount"}, + "DriftedStackInstancesCount":{"shape":"DriftedStackInstancesCount"}, + "InSyncStackInstancesCount":{"shape":"InSyncStackInstancesCount"}, + "InProgressStackInstancesCount":{"shape":"InProgressStackInstancesCount"}, + "FailedStackInstancesCount":{"shape":"FailedStackInstancesCount"} + } + }, + "StackSetDriftDetectionStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED", + "PARTIAL_SUCCESS", + "IN_PROGRESS", + "STOPPED" + ] + }, + "StackSetDriftStatus":{ + "type":"string", + "enum":[ + "DRIFTED", + "IN_SYNC", + "NOT_CHECKED" + ] + }, "StackSetId":{"type":"string"}, "StackSetName":{"type":"string"}, "StackSetNameOrId":{ @@ -2368,7 +2923,8 @@ "AdministrationRoleARN":{"shape":"RoleARN"}, "ExecutionRoleName":{"shape":"ExecutionRoleName"}, "CreationTimestamp":{"shape":"Timestamp"}, - "EndTimestamp":{"shape":"Timestamp"} + "EndTimestamp":{"shape":"Timestamp"}, + "StackSetDriftDetectionDetails":{"shape":"StackSetDriftDetectionDetails"} } }, "StackSetOperationAction":{ @@ -2376,7 +2932,8 @@ "enum":[ "CREATE", "UPDATE", - "DELETE" + "DELETE", + "DETECT_DRIFT" ] }, "StackSetOperationPreferences":{ @@ -2454,7 +3011,9 @@ "StackSetName":{"shape":"StackSetName"}, "StackSetId":{"shape":"StackSetId"}, "Description":{"shape":"Description"}, - "Status":{"shape":"StackSetStatus"} + "Status":{"shape":"StackSetStatus"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackStatus":{ @@ -2533,6 +3092,10 @@ }, "exception":true }, + "StatusMessage":{ + "type":"string", + "max":1024 + }, "StopStackSetOperationInput":{ "type":"structure", "required":[ @@ -2621,12 +3184,79 @@ }, "exception":true }, + "TotalStackInstancesCount":{ + "type":"integer", + "min":0 + }, "TransformName":{"type":"string"}, "TransformsList":{ "type":"list", "member":{"shape":"TransformName"} }, "Type":{"type":"string"}, + "TypeArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:([0-9]{12})?:type/.+" + }, + "TypeName":{ + "type":"string", + "max":196, + "min":10, + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + }, + "TypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TypeNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TypeSchema":{ + "type":"string", + "max":16777216, + "min":1 + }, + "TypeSummaries":{ + "type":"list", + "member":{"shape":"TypeSummary"} + }, + "TypeSummary":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "DefaultVersionId":{"shape":"TypeVersionId"}, + "TypeArn":{"shape":"TypeArn"}, + "LastUpdated":{"shape":"Timestamp"}, + "Description":{"shape":"Description"} + } + }, + "TypeVersionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-]+" + }, + "TypeVersionSummaries":{ + "type":"list", + "member":{"shape":"TypeVersionSummary"} + }, + "TypeVersionSummary":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"}, + "Arn":{"shape":"TypeArn"}, + "TimeCreated":{"shape":"Timestamp"}, + "Description":{"shape":"Description"} + } + }, "UpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -2747,6 +3377,13 @@ } }, "Value":{"type":"string"}, - "Version":{"type":"string"} + "Version":{"type":"string"}, + "Visibility":{ + "type":"string", + "enum":[ + "PUBLIC", + "PRIVATE" + ] + } } } diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index 89363b0f2e6..77cf095dffa 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -12,6 +12,7 @@ "DeleteStack": "Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.
", "DeleteStackInstances": "Deletes stack instances for the specified accounts, in the specified regions.
", "DeleteStackSet": "Deletes a stack set. Before you can delete a stack set, all of its member stack instances must be deleted. For more information about how to do this, see DeleteStackInstances.
", + "DeregisterType": "Removes a type or type version from active use in the CloudFormation registry. If a type or type version is deregistered, it cannot be used in CloudFormation operations.
To deregister a type, you must individually deregister all registered versions of that type. If a type has only a single registered version, deregistering that version results in the type itself being deregistered.
You cannot deregister the default version of a type, unless it is the only registered version of that type, in which case the type itself is deregistered as well.
", "DescribeAccountLimits": "Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see AWS CloudFormation Limits in the AWS CloudFormation User Guide.
", "DescribeChangeSet": "Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.
", "DescribeStackDriftDetectionStatus": "Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information on stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.
Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift
returns a StackDriftDetectionId
you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus
. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.
Returns the description of the specified stack set.
", "DescribeStackSetOperation": "Returns the description of the specified stack set operation.
", "DescribeStacks": "Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.
If the stack does not exist, an AmazonCloudFormationException
is returned.
Returns detailed information about a type that has been registered.
If you specify a VersionId
, DescribeType
returns information about that specific type version. Otherwise, it returns information about the default type version.
Returns information about a type's registration, including its current status and type and version identifiers.
When you initiate a registration request using RegisterType
, you can then use DescribeTypeRegistration
to monitor the progress of that registration request.
Once the registration request has completed, use DescribeType
to return detailed informaiton about a type.
Detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, AWS CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
Use DetectStackDrift
to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.
For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.
DetectStackDrift
can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.
When detecting drift on a stack, AWS CloudFormation does not detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift
directly on the nested stack itself.
Returns information about whether a resource's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which AWS CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.
Use DetectStackResourceDrift
to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.
Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.
", + "DetectStackSetDrift": "Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation Performs Drift Detection on a Stack Set.
DetectStackSetDrift
returns the OperationId
of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation
to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, as well as the number of resources included in each stack.
Once the operation has completed, use the following actions to return drift information:
Use DescribeStackSet
to return detailed informaiton about the stack set, including detailed information about the last completed drift operation performed on the stack set. (Information about drift operations that are in progress is not included.)
Use ListStackInstances
to return a list of stack instances belonging to the stack set, including the drift status and last drift time checked of each instance.
Use DescribeStackInstance
to return detailed information about a specific stack instance, including its drift status and last drift time checked.
For more information on performing a drift detection operation on a stack set, see Detecting Unmanaged Changes in Stack Sets.
You can only run a single drift detection operation on a given stack set at one time.
To stop a drift detection stack set operation, use StopStackSetOperation
.
Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.
", "ExecuteChangeSet": "Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.
When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.
If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.
", "GetStackPolicy": "Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is returned.
", @@ -39,7 +43,13 @@ "ListStackSetOperations": "Returns summary information about operations performed on a stack set.
", "ListStackSets": "Returns summary information about stack sets that are associated with the user.
", "ListStacks": "Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).
", + "ListTypeRegistrations": "Returns a list of registration tokens for the specified type.
", + "ListTypeVersions": "Returns summary information about the versions of a type.
", + "ListTypes": "Returns summary information about types that have been registered with CloudFormation.
", + "RecordHandlerProgress": "Reports progress of a resource handler to CloudFormation.
Reserved for use by the CloudFormation CLI. Do not use this API in your code.
", + "RegisterType": "Registers a type with the CloudFormation service. Registering a type makes it available for use in CloudFormation templates in your AWS account, and includes:
Validating the resource schema
Determining which handlers have been specified for the resource
Making the resource type available for use in your account
For more information on how to develop types and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.
Once you have initiated a registration request using RegisterType
, you can use DescribeTypeRegistration
to monitor the progress of the registration request.
Sets a stack policy for a specified stack.
", + "SetTypeDefaultVersion": "Specify the default version of a type. The default version of a type will be used in CloudFormation operations.
", "SignalResource": "Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.
", "StopStackSetOperation": "Stops an in-progress operation on a stack set and its associated stack instances.
", "UpdateStack": "Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.
To get a copy of the template for an existing stack, you can use the GetTemplate action.
For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.
", @@ -134,6 +144,11 @@ "DescribeStackResourceDriftsInput$MaxResults": "The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
An error occurred during a CloudFormation registry operation.
", + "refs": { + } + }, "CancelUpdateStackInput": { "base": "The input for the CancelUpdateStack action.
", "refs": { @@ -279,8 +294,11 @@ "DeleteStackInstancesInput$OperationId": "The unique identifier for this stack set operation.
If you don't specify an operation ID, the SDK generates one automatically.
The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.
Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED
.
The unique identifier for this stack set operation.
", "DescribeStackSetOperationInput$OperationId": "The unique ID of the stack set operation.
", + "DetectStackSetDriftInput$OperationId": "The ID of the stack set operation.
", + "DetectStackSetDriftOutput$OperationId": "The ID of the drift detection stack set operation.
you can use this operation id with DescribeStackSetOperation
to monitor the progress of the drift detection operation.
A unique identifier for this ExecuteChangeSet
request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet
requests to ensure that AWS CloudFormation successfully received them.
The ID of the stack set operation.
", + "RecordHandlerProgressInput$ClientRequestToken": "Reserved for use by the CloudFormation CLI.
", "StackEvent$ClientRequestToken": "The token passed to the operation that generated this event.
All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack
operation with the token token1
, then all the StackEvents
generated by that operation will have ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The unique ID of a stack set operation.
", "StackSetOperationSummary$OperationId": "The unique ID of the stack set operation.
", @@ -295,7 +313,8 @@ "ClientToken": { "base": null, "refs": { - "CreateChangeSetInput$ClientToken": "A unique identifier for this CreateChangeSet
request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet
requests to ensure that AWS CloudFormation successfully received them.
A unique identifier for this CreateChangeSet
request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet
requests to ensure that AWS CloudFormation successfully received them.
Reserved for use by the CloudFormation CLI.
" } }, "ContinueUpdateRollbackInput": { @@ -404,6 +423,24 @@ "StackSummary$DeletionTime": "The time the stack was deleted.
" } }, + "DeprecatedStatus": { + "base": null, + "refs": { + "DescribeTypeOutput$DeprecatedStatus": "The deprecation status of the type.
Valid values include:
LIVE
: The type is registered and can be used in CloudFormation operations, dependent on its provisioning behavior and visibility scope.
DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations.
The deprecation status of the type versions that you want to get summary information about.
Valid values include:
LIVE
: The type version is registered and can be used in CloudFormation operations, dependent on its provisioning behavior and visibility scope.
DEPRECATED
: The type version has been deregistered and can no longer be used in CloudFormation operations.
The deprecation status of the types that you want to get summary information about.
Valid values include:
LIVE
: The type is registered for use in CloudFormation operations.
DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations.
The input for the DescribeAccountLimits action.
", "refs": { @@ -514,6 +551,26 @@ "refs": { } }, + "DescribeTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeTypeOutput": { + "base": null, + "refs": { + } + }, + "DescribeTypeRegistrationInput": { + "base": null, + "refs": { + } + }, + "DescribeTypeRegistrationOutput": { + "base": null, + "refs": { + } + }, "Description": { "base": null, "refs": { @@ -521,6 +578,8 @@ "CreateChangeSetInput$Description": "A description to help you identify this change set.
", "CreateStackSetInput$Description": "A description of the stack set. You can use the description to identify the stack set's purpose or other important information.
", "DescribeChangeSetOutput$Description": "Information about the change set.
", + "DescribeTypeOutput$Description": "The description of the registered type.
", + "DescribeTypeRegistrationOutput$Description": "The description of the type registration request.
", "GetTemplateSummaryOutput$Description": "The value that is defined in the Description
property of the template.
User defined description associated with the output.
", "ParameterDeclaration$Description": "The description that is associate with the parameter.
", @@ -530,6 +589,8 @@ "StackSet$Description": "A description of the stack set that you specify when the stack set is created or updated.
", "StackSetSummary$Description": "A description of the stack set that you specify when the stack set is created or updated.
", "TemplateParameter$Description": "User defined description associated with the parameter.
", + "TypeSummary$Description": "The description of the type.
", + "TypeVersionSummary$Description": "The description of the type version.
", "UpdateStackSetInput$Description": "A brief description of updates that you are making.
", "ValidateTemplateOutput$Description": "The description found within the template.
" } @@ -554,6 +615,16 @@ "refs": { } }, + "DetectStackSetDriftInput": { + "base": null, + "refs": { + } + }, + "DetectStackSetDriftOutput": { + "base": null, + "refs": { + } + }, "DifferenceType": { "base": null, "refs": { @@ -567,6 +638,12 @@ "Stack$DisableRollback": "Boolean to enable or disable rollback on stack creation failures:
true
: disable rollback
false
: enable rollback
The number of stack instances that have drifted from the expected template and parameter configuration of the stack set. A stack instance is considered to have drifted if one or more of the resources in the associated stack do not match their expected configuration.
" + } + }, "EnableTerminationProtection": { "base": null, "refs": { @@ -575,6 +652,12 @@ "UpdateTerminationProtectionInput$EnableTerminationProtection": "Whether to enable termination protection on the specified stack.
" } }, + "ErrorMessage": { + "base": null, + "refs": { + "CFNRegistryException$Message": null + } + }, "EstimateTemplateCostInput": { "base": "The input for an EstimateTemplateCost action.
", "refs": { @@ -649,6 +732,12 @@ "ListExportsOutput$Exports": "The output for the ListExports action.
" } }, + "FailedStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$FailedStackInstancesCount": "The number of stack instances for which the drift detection operation failed.
" + } + }, "FailureToleranceCount": { "base": null, "refs": { @@ -691,12 +780,30 @@ "refs": { } }, + "HandlerErrorCode": { + "base": null, + "refs": { + "RecordHandlerProgressInput$ErrorCode": "Reserved for use by the CloudFormation CLI.
" + } + }, "Imports": { "base": null, "refs": { "ListImportsOutput$Imports": "A list of stack names that are importing the specified exported output value.
" } }, + "InProgressStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$InProgressStackInstancesCount": "The number of stack instances that are currently being checked for drift.
" + } + }, + "InSyncStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$InSyncStackInstancesCount": "The number of stack instances which match the expected template and parameter configuration of the stack set.
" + } + }, "InsufficientCapabilitiesException": { "base": "The template contains resources with capabilities that weren't specified in the Capabilities parameter.
", "refs": { @@ -712,6 +819,11 @@ "refs": { } }, + "InvalidStateTransitionException": { + "base": "Error reserved for use by the CloudFormation CLI. CloudFormation does not return this error to users.
", + "refs": { + } + }, "Key": { "base": null, "refs": { @@ -832,6 +944,49 @@ "refs": { } }, + "ListTypeRegistrationsInput": { + "base": null, + "refs": { + } + }, + "ListTypeRegistrationsOutput": { + "base": null, + "refs": { + } + }, + "ListTypeVersionsInput": { + "base": null, + "refs": { + } + }, + "ListTypeVersionsOutput": { + "base": null, + "refs": { + } + }, + "ListTypesInput": { + "base": null, + "refs": { + } + }, + "ListTypesOutput": { + "base": null, + "refs": { + } + }, + "LogGroupName": { + "base": null, + "refs": { + "LoggingConfig$LogGroupName": "The Amazon CloudWatch log group to which CloudFormation sends error logging information when invoking the type's handlers.
" + } + }, + "LoggingConfig": { + "base": "Contains logging configuration information for a type.
", + "refs": { + "DescribeTypeOutput$LoggingConfig": "Contains logging configuration information for a type.
", + "RegisterTypeInput$LoggingConfig": "Specifies logging configuration information for a type.
" + } + }, "LogicalResourceId": { "base": null, "refs": { @@ -875,7 +1030,10 @@ "ListStackInstancesInput$MaxResults": "The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call ListStackSets
again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call ListStackInstances
again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
A string that identifies the next page of stacks that you want to retrieve.
", - "ListStacksOutput$NextToken": "If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.
" + "ListStacksOutput$NextToken": "If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.
", + "ListTypeRegistrationsInput$NextToken": "If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
Reserved for use by the CloudFormation CLI.
", + "RecordHandlerProgressInput$CurrentOperationStatus": "Reserved for use by the CloudFormation CLI.
" + } + }, + "OperationStatusCheckFailedException": { + "base": "Error reserved for use by the CloudFormation CLI. CloudFormation does not return this error to users.
", + "refs": { + } + }, + "OptionalSecureUrl": { + "base": null, + "refs": { + "DescribeTypeOutput$SourceUrl": "The URL of the source code for the type.
", + "DescribeTypeOutput$DocumentationUrl": "The URL of a page providing detailed documentation for this type.
" + } + }, "Output": { "base": "The Output data type.
", "refs": { @@ -1085,6 +1268,14 @@ "PhysicalResourceIdContext$member": null } }, + "PrivateTypeArn": { + "base": null, + "refs": { + "DeregisterTypeInput$Arn": "The Amazon Resource Name (ARN) of the type.
Conditional: You must specify TypeName
or Arn
.
The Amazon Resource Name (ARN) of the type for which you want version summary information.
Conditional: You must specify TypeName
or Arn
.
The Amazon Resource Name (ARN) of the type for which you want version summary information.
Conditional: You must specify TypeName
or Arn
.
The actual property value of the resource property.
" } }, + "ProvisioningType": { + "base": null, + "refs": { + "DescribeTypeOutput$ProvisioningType": "The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.
Valid values include:
FULLY_MUTABLE
: The type includes an update handler to process updates to the type during stack update operations.
IMMUTABLE
: The type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.
NON_PROVISIONABLE
: The type does not include all of the following handlers, and therefore cannot actually be provisioned.
create
read
delete
The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.
Valid values include:
FULLY_MUTABLE
: The type includes an update handler to process updates to the type during stack update operations.
IMMUTABLE
: The type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.
NON_PROVISIONABLE
: The type does not include create, read, and delete handlers, and therefore cannot actually be provisioned.
The reason for the assigned result status.
" } }, + "RecordHandlerProgressInput": { + "base": null, + "refs": { + } + }, + "RecordHandlerProgressOutput": { + "base": null, + "refs": { + } + }, "Region": { "base": null, "refs": { @@ -1152,12 +1360,63 @@ "UpdateStackSetInput$Regions": "The regions in which to update associated stack instances. If you specify regions, you must also specify accounts in which to update stack set instances.
To update all the stack instances associated with this stack set, do not specify the Accounts
or Regions
properties.
If the stack set update includes changes to the template (that is, if the TemplateBody
or TemplateURL
properties are specified), or the Parameters
property, AWS CloudFormation marks all stack instances with a status of OUTDATED
prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.
The current status of the type registration request.
", + "ListTypeRegistrationsInput$RegistrationStatusFilter": "The current status of the type registration request.
" + } + }, + "RegistrationToken": { + "base": null, + "refs": { + "DescribeTypeRegistrationInput$RegistrationToken": "The identifier for this registration request.
This registration token is generated by CloudFormation when you initiate a registration request using RegisterType
.
The identifier for this registration request.
Use this registration token when calling DescribeTypeRegistration
, which returns information about the status and IDs of the type registration.
A list of type registration tokens.
Use DescribeTypeRegistration
to return detailed information about a type registration request.
The kind of type.
Currently the only valid value is RESOURCE
.
The kind of type.
Currently the only valid value is RESOURCE
.
The kind of type.
Currently the only valid value is RESOURCE
.
The kind of type.
Currently the only valid value is RESOURCE
.
The kind of the type.
Currently the only valid value is RESOURCE
.
The kind of type.
Currently, the only valid value is RESOURCE
.
The kind of type.
", + "TypeSummary$Type": "The kind of type.
", + "TypeVersionSummary$Type": "The kind of type.
" + } + }, "Replacement": { "base": null, "refs": { "ResourceChange$Replacement": "For the Modify
action, indicates whether AWS CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation
property in the ResourceTargetDefinition
structure. For example, if the RequiresRecreation
field is Always
and the Evaluation
field is Static
, Replacement
is True
. If the RequiresRecreation
field is Always
and the Evaluation
field is Dynamic
, Replacement
is Conditionally
.
If you have multiple changes with different RequiresRecreation
values, the Replacement
value depends on the change with the most impact. A RequiresRecreation
value of Always
has the most impact, followed by Conditionally
, and then Never
.
A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of a type from the same registeration request, even if the request is submitted multiple times.
" + } + }, "RequiresRecreation": { "base": null, "refs": { @@ -1226,6 +1485,12 @@ "ResourceIdentifierSummary$ResourceIdentifiers": "The resource properties you can provide during the import to identify your target resources. For example, BucketName
is a possible identifier property for AWS::S3::Bucket
resources.
Reserved for use by the CloudFormation CLI.
" + } + }, "ResourceProperties": { "base": null, "refs": { @@ -1348,6 +1613,14 @@ "UpdateStackSetInput$AdministrationRoleARN": "The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.
Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.
If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.
" } }, + "RoleArn": { + "base": null, + "refs": { + "DescribeTypeOutput$ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM execution role used to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.
", + "LoggingConfig$LogRoleArn": "The ARN of the role that CloudFormation should assume when sending log entries to CloudWatch logs.
", + "RegisterTypeInput$ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM execution role to use to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.
" + } + }, "RollbackConfiguration": { "base": "Structure containing the rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.
Rollback triggers enable you to have AWS CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.
", "refs": { @@ -1370,6 +1643,12 @@ "RollbackConfiguration$RollbackTriggers": "The triggers to monitor during stack creation or update actions.
By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:
To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.
To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.
To remove all currently specified triggers, specify an empty list for this parameter.
If a specified trigger is missing, the entire stack operation fails and is rolled back.
" } }, + "S3Url": { + "base": null, + "refs": { + "RegisterTypeInput$SchemaHandlerPackage": "A url to the S3 bucket containing the schema handler package that contains the schema, event handlers, and associated files for the type you want to register.
For information on generating a schema handler package for the type you want to register, see submit in the CloudFormation CLI User Guide.
" + } + }, "Scope": { "base": null, "refs": { @@ -1381,6 +1660,16 @@ "refs": { } }, + "SetTypeDefaultVersionInput": { + "base": null, + "refs": { + } + }, + "SetTypeDefaultVersionOutput": { + "base": null, + "refs": { + } + }, "SignalResourceInput": { "base": "The input for the SignalResource action.
", "refs": { @@ -1429,7 +1718,10 @@ "refs": { "DescribeStackDriftDetectionStatusOutput$StackDriftStatus": "Status of the stack's actual configuration compared to its expected configuration.
DRIFTED
: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its expected template configuration.
IN_SYNC
: The stack's actual configuration matches its expected template configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack's actual configuration compared to its expected template configuration.
DRIFTED
: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its expected template configuration.
IN_SYNC
: The stack's actual configuration matches its expected template configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack's actual configuration compared to its expected template configuration.
DRIFTED
: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its expected template configuration.
IN_SYNC
: The stack's actual configuration matches its expected template configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack's actual configuration compared to its expected template configuration.
DRIFTED
: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its expected template configuration.
IN_SYNC
: The stack's actual configuration matches its expected template configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.
DRIFTED
: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack instance differs from its expected stack set configuration.
IN_SYNC
: The stack instance's actual configuration matches its expected stack set configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.
DRIFTED
: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked if the stack instance differs from its expected stack set configuration.
IN_SYNC
: The stack instance's actual configuration matches its expected stack set configuration.
UNKNOWN
: This value is reserved for future use.
Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.
DRIFTED
: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked the stack set for drift.
IN_SYNC
: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.
UNKNOWN
: This value is reserved for future use.
The Amazon Resource Number (ARN) of the stack set.
" } }, + "StackSetDriftDetectionDetails": { + "base": "Detailed information about the drift status of the stack set.
For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations in-progress is not included.
For stack set operations, includes information about drift operations currently being performed on the stack set.
For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.
", + "refs": { + "StackSet$StackSetDriftDetectionDetails": "Detailed information about the drift status of the stack set.
For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations currently in progress is not included.
", + "StackSetOperation$StackSetDriftDetectionDetails": "Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set.
this information will only be present for stack set operations whose Action
type is DETECT_DRIFT
.
For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.
" + } + }, + "StackSetDriftDetectionStatus": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$DriftDetectionStatus": "The status of the stack set drift detection operation.
COMPLETED
: The drift detection operation completed without failing on any stack instances.
FAILED
: The drift detection operation exceeded the specified failure tolerance.
PARTIAL_SUCCESS
: The drift detection operation completed without exceeding the failure tolerance for the operation.
IN_PROGRESS
: The drift detection operation is currently being performed.
STOPPED
: The user has cancelled the drift detection operation.
Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.
DRIFTED
: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.
NOT_CHECKED
: AWS CloudFormation has not checked the stack set for drift.
IN_SYNC
: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.
The name of the stack set on which to perform the drift detection operation.
", "GetTemplateSummaryInput$StackSetName": "The name or unique ID of the stack set from which the stack was created.
Conditional: You must specify only one of the following parameters: StackName
, StackSetName
, TemplateBody
, or TemplateURL
.
The name or unique ID of the stack set associated with the stack instances.
" } @@ -1718,6 +2030,7 @@ "refs": { "CreateStackInstancesInput$OperationPreferences": "Preferences for how AWS CloudFormation performs this stack set operation.
", "DeleteStackInstancesInput$OperationPreferences": "Preferences for how AWS CloudFormation performs this stack set operation.
", + "DetectStackSetDriftInput$OperationPreferences": null, "StackSetOperation$OperationPreferences": "The preferences for how AWS CloudFormation performs this stack set operation.
", "UpdateStackInstancesInput$OperationPreferences": "Preferences for how AWS CloudFormation performs this stack set operation.
", "UpdateStackSetInput$OperationPreferences": "Preferences for how AWS CloudFormation performs this stack set operation.
" @@ -1830,6 +2143,12 @@ "refs": { } }, + "StatusMessage": { + "base": null, + "refs": { + "RecordHandlerProgressInput$StatusMessage": "Reserved for use by the CloudFormation CLI.
" + } + }, "StopStackSetOperationInput": { "base": null, "refs": { @@ -1935,19 +2254,27 @@ "base": null, "refs": { "DescribeStackDriftDetectionStatusOutput$Timestamp": "Time at which the stack drift detection operation was initiated.
", + "DescribeTypeOutput$LastUpdated": "When the specified type version was registered.
", + "DescribeTypeOutput$TimeCreated": "When the specified type version was registered.
", "StackDriftInformation$LastCheckTimestamp": "Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources that support drift detection.
", "StackDriftInformationSummary$LastCheckTimestamp": "Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources that support drift detection.
", "StackEvent$Timestamp": "Time the status was updated.
", + "StackInstance$LastDriftCheckTimestamp": "Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL
for any stack instance on which drift detection has not yet been performed.
Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL
for any stack instance on which drift detection has not yet been performed.
Time the status was updated.
", "StackResourceDetail$LastUpdatedTimestamp": "Time the status was updated.
", "StackResourceDrift$Timestamp": "Time at which AWS CloudFormation performed drift detection on the stack resource.
", "StackResourceDriftInformation$LastCheckTimestamp": "When AWS CloudFormation last checked if the resource had drifted from its expected configuration.
", "StackResourceDriftInformationSummary$LastCheckTimestamp": "When AWS CloudFormation last checked if the resource had drifted from its expected configuration.
", "StackResourceSummary$LastUpdatedTimestamp": "Time the status was updated.
", + "StackSetDriftDetectionDetails$LastDriftCheckTimestamp": "Most recent time when CloudFormation performed a drift detection operation on the stack set. This value will be NULL
for any stack set on which drift detection has not yet been performed.
The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.
", "StackSetOperation$EndTimestamp": "The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.
", "StackSetOperationSummary$CreationTimestamp": "The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.
", - "StackSetOperationSummary$EndTimestamp": "The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.
" + "StackSetOperationSummary$EndTimestamp": "The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.
", + "StackSetSummary$LastDriftCheckTimestamp": "Most recent time when CloudFormation performed a drift detection operation on the stack set. This value will be NULL
for any stack set on which drift detection has not yet been performed.
When the current default version of the type was registered.
", + "TypeVersionSummary$TimeCreated": "When the version was registered.
" } }, "TokenAlreadyExistsException": { @@ -1955,6 +2282,12 @@ "refs": { } }, + "TotalStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$TotalStackInstancesCount": "The total number of stack instances belonging to this stack set.
The total number of stack instances is equal to the total of:
Stack instances that match the stack set configuration.
Stack instances that have drifted from the stack set configuration.
Stack instances where the drift detection operation has failed.
Stack instances currently being checked for drift.
The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.
" } }, + "TypeArn": { + "base": null, + "refs": { + "DescribeTypeInput$Arn": "The Amazon Resource Name (ARN) of the type.
Conditional: You must specify TypeName
or Arn
.
The Amazon Resource Name (ARN) of the type.
", + "DescribeTypeRegistrationOutput$TypeArn": "The Amazon Resource Name (ARN) of the type being registered.
For registration requests with a ProgressStatus
of other than COMPLETE
, this will be null
.
The Amazon Resource Name (ARN) of this specific version of the type being registered.
For registration requests with a ProgressStatus
of other than COMPLETE
, this will be null
.
The Amazon Resource Name (ARN) of the type.
Conditional: You must specify TypeName
or Arn
.
The Amazon Resource Name (ARN) of the type.
", + "TypeVersionSummary$Arn": "The Amazon Resource Name (ARN) of the type version.
" + } + }, + "TypeName": { + "base": null, + "refs": { + "DeregisterTypeInput$TypeName": "The name of the type.
Conditional: You must specify TypeName
or Arn
.
The name of the type.
Conditional: You must specify TypeName
or Arn
.
The name of the registered type.
", + "ListTypeRegistrationsInput$TypeName": "The name of the type.
Conditional: You must specify TypeName
or Arn
.
The name of the type for which you want version summary information.
Conditional: You must specify TypeName
or Arn
.
The name of the type being registered.
We recommend that type names adhere to the following pattern: company_or_organization::service::type.
The following organization namespaces are reserved and cannot be used in your resource type names:
Alexa
AMZN
Amazon
AWS
Custom
Dev
The name of the type.
Conditional: You must specify TypeName
or Arn
.
The name of the type.
", + "TypeVersionSummary$TypeName": "The name of the type.
" + } + }, + "TypeNotFoundException": { + "base": "The specified type does not exist in the CloudFormation registry.
", + "refs": { + } + }, + "TypeSchema": { + "base": null, + "refs": { + "DescribeTypeOutput$Schema": "The schema that defines the type.
For more information on type schemas, see Resource Provider Schema in the CloudFormation CLI User Guide.
" + } + }, + "TypeSummaries": { + "base": null, + "refs": { + "ListTypesOutput$TypeSummaries": "A list of TypeSummary
structures that contain information about the specified types.
Contains summary information about the specified CloudFormation type.
", + "refs": { + "TypeSummaries$member": null + } + }, + "TypeVersionId": { + "base": null, + "refs": { + "DeregisterTypeInput$VersionId": "The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
", + "DescribeTypeInput$VersionId": "The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
If you specify a VersionId
, DescribeType
returns information about that specific type version. Otherwise, it returns information about the default type version.
The ID of the default version of the type. The default version is used when the type version is not specified.
To set the default version of a type, use SetTypeDefaultVersion
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
", + "TypeSummary$DefaultVersionId": "The ID of the default version of the type. The default version is used when the type version is not specified.
To set the default version of a type, use SetTypeDefaultVersion
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
" + } + }, + "TypeVersionSummaries": { + "base": null, + "refs": { + "ListTypeVersionsOutput$TypeVersionSummaries": "A list of TypeVersionSummary
structures that contain information about the specified type's versions.
Contains summary information about a specific version of a CloudFormation type.
", + "refs": { + "TypeVersionSummaries$member": null + } + }, "UpdateStackInput": { "base": "The input for an UpdateStack action.
", "refs": { @@ -2055,6 +2460,13 @@ "refs": { "GetTemplateSummaryOutput$Version": "The AWS template format version, which identifies the capabilities of the template.
" } + }, + "Visibility": { + "base": null, + "refs": { + "DescribeTypeOutput$Visibility": "The scope at which the type is visible and usable in CloudFormation operations.
Valid values include:
PRIVATE
: The type is only visible and usable within the account in which it is registered. Currently, AWS CloudFormation marks any types you register as PRIVATE
.
PUBLIC
: The type is publically visible and usable within any Amazon account.
The scope at which the type is visible and usable in CloudFormation operations.
Valid values include:
PRIVATE
: The type is only visible and usable within the account in which it is registered. Currently, AWS CloudFormation marks any types you create as PRIVATE
.
PUBLIC
: The type is publically visible and usable within any Amazon account.
You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.
The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.
", "operations": { "BuildSuggesters": "Indexes the search suggestions. For more information, see Configuring Suggesters in the Amazon CloudSearch Developer Guide.
", "CreateDomain": "Creates a new search domain. For more information, see Creating a Search Domain in the Amazon CloudSearch Developer Guide.
", @@ -14,6 +15,7 @@ "DeleteSuggester": "Deletes a suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.
", "DescribeAnalysisSchemes": "Gets the analysis schemes configured for a domain. An analysis scheme defines language-specific text processing options for a text
field. Can be limited to specific analysis schemes by name. By default, shows all analysis schemes and includes any pending changes to the configuration. Set the Deployed
option to true
to show the active configuration and exclude pending changes. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.
Gets the availability options configured for a domain. By default, shows the configuration with any pending changes. Set the Deployed
option to true
to show the active configuration and exclude pending changes. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.
Returns the domain's endpoint options, specifically whether all requests to the domain must arrive over HTTPS. For more information, see Configuring Domain Endpoint Options in the Amazon CloudSearch Developer Guide.
", "DescribeDomains": "Gets information about the search domains owned by this account. Can be limited to specific domains. Shows all domains by default. To get the number of searchable documents in a domain, use the console or submit a matchall
request to your domain's search endpoint: q=matchall&q.parser=structured&size=0
. For more information, see Getting Information about a Search Domain in the Amazon CloudSearch Developer Guide.
Gets the expressions configured for the search domain. Can be limited to specific expressions by name. By default, shows all expressions and includes any pending changes to the configuration. Set the Deployed
option to true
to show the active configuration and exclude pending changes. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.
Gets information about the index fields configured for the search domain. Can be limited to specific fields by name. By default, shows all fields and includes any pending changes to the configuration. Set the Deployed
option to true
to show the active configuration and exclude pending changes. For more information, see Getting Domain Information in the Amazon CloudSearch Developer Guide.
Tells the search domain to start indexing its documents using the latest indexing options. This operation must be invoked to activate options whose OptionStatus is RequiresIndexDocuments
.
Lists all search domains owned by an account.
", "UpdateAvailabilityOptions": "Configures the availability options for a domain. Enabling the Multi-AZ option expands an Amazon CloudSearch domain to an additional Availability Zone in the same Region to increase fault tolerance in the event of a service disruption. Changes to the Multi-AZ option can take about half an hour to become active. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.
", + "UpdateDomainEndpointOptions": "Updates the domain's endpoint options, specifically whether all requests to the domain must arrive over HTTPS. For more information, see Configuring Domain Endpoint Options in the Amazon CloudSearch Developer Guide.
", "UpdateScalingParameters": "Configures scaling parameters for a domain. A domain's scaling parameters specify the desired search instance type and replication count. Amazon CloudSearch will still automatically scale your domain based on the volume of data and traffic, but not below the desired instance type and replication count. If the Multi-AZ option is enabled, these values control the resources used per Availability Zone. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.
", "UpdateServiceAccessPolicies": "Configures the access rules that control access to the domain's document and search endpoints. For more information, see Configuring Access for an Amazon CloudSearch Domain.
" }, - "service": "You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.
The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.
", "shapes": { "APIVersion": { "base": "The Amazon CloudSearch API version for a domain: 2011-02-01 or 2013-01-01.
", @@ -110,10 +112,12 @@ "DateOptions$SortEnabled": "Whether the field can be used to sort the search results.
", "DescribeAnalysisSchemesRequest$Deployed": "Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether to retrieve the latest configuration (which might be in a Processing state) or the current, active configuration. Defaults to false
.
Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether to display the deployed configuration (true
) or include any pending changes (false
). Defaults to false
.
Whether the domain is HTTPS only enabled.
", "DomainStatus$Created": "True if the search domain is created. It can take several minutes to initialize a domain when CreateDomain is called. Newly created search domains are returned from DescribeDomains with a false value for Created until domain creation is complete.
", "DomainStatus$Deleted": "True if the search domain has been deleted. The system must clean up resources dedicated to the search domain when DeleteDomain is called. Newly deleted search domains are returned from DescribeDomains with a true value for IsDeleted for several minutes until resource cleanup is complete.
", "DomainStatus$RequiresIndexDocuments": "True if IndexDocuments needs to be called to activate the current domain configuration.
", @@ -294,6 +298,16 @@ "refs": { } }, + "DescribeDomainEndpointOptionsRequest": { + "base": "Container for the parameters to the DescribeDomainEndpointOptions
operation. Specify the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true
.
The result of a DescribeDomainEndpointOptions
request. Contains the status and configuration of a search domain's endpoint options.
Container for the parameters to the DescribeDomains
operation. By default shows the status of all domains. To restrict the response to particular domains, specify the names of the domains you want to describe.
The domain's endpoint options.
", + "refs": { + "DomainEndpointOptionsStatus$Options": "The domain endpoint options configured for the domain.
", + "UpdateDomainEndpointOptionsRequest$DomainEndpointOptions": "Whether to require that all requests to the domain arrive over HTTPS. We recommend Policy-Min-TLS-1-2-2019-07 for TLSSecurityPolicy. For compatibility with older clients, the default is Policy-Min-TLS-1-0-2019-07.
" + } + }, + "DomainEndpointOptionsStatus": { + "base": "The configuration and status of the domain's endpoint options.
", + "refs": { + "DescribeDomainEndpointOptionsResponse$DomainEndpointOptions": "The status and configuration of a search domain's endpoint options.
", + "UpdateDomainEndpointOptionsResponse$DomainEndpointOptions": "The newly-configured domain endpoint options.
" + } + }, "DomainId": { "base": "An internally generated unique identifier for a domain.
", "refs": { @@ -387,6 +415,7 @@ "DeleteSuggesterRequest$DomainName": null, "DescribeAnalysisSchemesRequest$DomainName": "The name of the domain you want to describe.
", "DescribeAvailabilityOptionsRequest$DomainName": "The name of the domain you want to describe.
", + "DescribeDomainEndpointOptionsRequest$DomainName": "A string that represents the name of a domain.
", "DescribeExpressionsRequest$DomainName": "The name of the domain you want to describe.
", "DescribeIndexFieldsRequest$DomainName": "The name of the domain you want to describe.
", "DescribeScalingParametersRequest$DomainName": null, @@ -397,6 +426,7 @@ "DomainStatus$DomainName": null, "IndexDocumentsRequest$DomainName": null, "UpdateAvailabilityOptionsRequest$DomainName": null, + "UpdateDomainEndpointOptionsRequest$DomainName": "A string that represents the name of a domain.
", "UpdateScalingParametersRequest$DomainName": null, "UpdateServiceAccessPoliciesRequest$DomainName": null } @@ -668,7 +698,7 @@ "OptionState": { "base": "The state of processing a change to an option. One of:
The state of processing a change to an option. Possible values:
RequiresIndexDocuments
: the option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.Processing
: the option's latest value is in the process of being activated. Active
: the option's latest value is completely deployed.FailedToValidate
: the option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.The state of processing a change to an option. Possible values:
RequiresIndexDocuments
: the option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.Processing
: the option's latest value is in the process of being activated. Active
: the option's latest value is completely deployed.FailedToValidate
: the option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.The status of the configured domain endpoint options.
", "ExpressionStatus$Status": null, "IndexFieldStatus$Status": null, "ScalingParametersStatus$Status": null, @@ -797,6 +828,12 @@ "DescribeSuggestersResponse$Suggesters": "The suggesters configured for the domain specified in the request.
" } }, + "TLSSecurityPolicy": { + "base": "The minimum required TLS version.
", + "refs": { + "DomainEndpointOptions$TLSSecurityPolicy": "The minimum required TLS version
" + } + }, "TextArrayOptions": { "base": "Options for a field that contains an array of text strings. Present if IndexFieldType
specifies the field is of type text-array
. A text-array
field is always searchable. All options are enabled by default.
Container for the parameters to the UpdateDomainEndpointOptions
operation. Specifies the name of the domain you want to update and the domain endpoint options.
The result of a UpdateDomainEndpointOptions
request. Contains the configuration and status of the domain's endpoint options.
Container for the parameters to the UpdateScalingParameters
operation. Specifies the name of the domain you want to update and the scaling parameters you want to configure.
A timestamp for when this option was last updated.
" } }, + "ValidationException": { + "base": "The request was rejected because it has invalid parameters.
", + "refs": { + } + }, "Word": { "base": null, "refs": { diff --git a/models/apis/cloudsearch/2013-01-01/examples-1.json b/models/apis/cloudsearch/2013-01-01/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/cloudsearch/2013-01-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/cloudsearch/2013-01-01/paginators-1.json b/models/apis/cloudsearch/2013-01-01/paginators-1.json index 82fa804ab75..fc7e95d086e 100644 --- a/models/apis/cloudsearch/2013-01-01/paginators-1.json +++ b/models/apis/cloudsearch/2013-01-01/paginators-1.json @@ -16,5 +16,4 @@ "result_key": "Suggesters" } } -} - +} \ No newline at end of file diff --git a/models/apis/cloudsearch/2013-01-01/smoke.json b/models/apis/cloudsearch/2013-01-01/smoke.json new file mode 100644 index 00000000000..04457b247af --- /dev/null +++ b/models/apis/cloudsearch/2013-01-01/smoke.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + { + "operationName": "DescribeDomains", + "input": {}, + "errorExpectedFromService": false + }, + { + "operationName": "DescribeIndexFields", + "input": { + "DomainName": "fakedomain" + }, + "errorExpectedFromService": true + } + ] +} diff --git a/models/apis/cloudtrail/2013-11-01/api-2.json b/models/apis/cloudtrail/2013-11-01/api-2.json index f6afa05526c..da8b8d47ae9 100644 --- a/models/apis/cloudtrail/2013-11-01/api-2.json +++ b/models/apis/cloudtrail/2013-11-01/api-2.json @@ -102,7 +102,8 @@ "output":{"shape":"DescribeTrailsResponse"}, "errors":[ {"shape":"UnsupportedOperationException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTrailNameException"} ], "idempotent":true }, @@ -122,6 +123,23 @@ ], "idempotent":true }, + "GetInsightSelectors":{ + "name":"GetInsightSelectors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInsightSelectorsRequest"}, + "output":{"shape":"GetInsightSelectorsResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InsightNotEnabledException"} + ], + "idempotent":true + }, "GetTrail":{ "name":"GetTrail", "http":{ @@ -148,7 +166,9 @@ "output":{"shape":"GetTrailStatusResponse"}, "errors":[ {"shape":"TrailNotFoundException"}, - {"shape":"InvalidTrailNameException"} + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} ], "idempotent":true }, @@ -213,7 +233,10 @@ {"shape":"InvalidLookupAttributesException"}, {"shape":"InvalidTimeRangeException"}, {"shape":"InvalidMaxResultsException"}, - {"shape":"InvalidNextTokenException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidEventCategoryException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} ], "idempotent":true }, @@ -237,6 +260,27 @@ ], "idempotent":true }, + "PutInsightSelectors":{ + "name":"PutInsightSelectors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutInsightSelectorsRequest"}, + "output":{"shape":"PutInsightSelectorsResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"}, + {"shape":"InvalidInsightSelectorsException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"NotOrganizationMasterAccountException"} + ], + "idempotent":true + }, "RemoveTags":{ "name":"RemoveTags", "http":{ @@ -315,6 +359,7 @@ {"shape":"InvalidKmsKeyIdException"}, {"shape":"InvalidTrailNameException"}, {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidEventSelectorsException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"KmsKeyNotFoundException"}, @@ -465,12 +510,17 @@ "CloudTrailEvent":{"shape":"String"} } }, + "EventCategory":{ + "type":"string", + "enum":["insight"] + }, "EventSelector":{ "type":"structure", "members":{ "ReadWriteType":{"shape":"ReadWriteType"}, "IncludeManagementEvents":{"shape":"Boolean"}, - "DataResources":{"shape":"DataResources"} + "DataResources":{"shape":"DataResources"}, + "ExcludeManagementEventSources":{"shape":"ExcludeManagementEventSources"} } }, "EventSelectors":{ @@ -481,6 +531,10 @@ "type":"list", "member":{"shape":"Event"} }, + "ExcludeManagementEventSources":{ + "type":"list", + "member":{"shape":"String"} + }, "GetEventSelectorsRequest":{ "type":"structure", "required":["TrailName"], @@ -495,6 +549,20 @@ "EventSelectors":{"shape":"EventSelectors"} } }, + "GetInsightSelectorsRequest":{ + "type":"structure", + "required":["TrailName"], + "members":{ + "TrailName":{"shape":"String"} + } + }, + "GetInsightSelectorsResponse":{ + "type":"structure", + "members":{ + "TrailARN":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, "GetTrailRequest":{ "type":"structure", "required":["Name"], @@ -537,6 +605,26 @@ "TimeLoggingStopped":{"shape":"String"} } }, + "InsightNotEnabledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsightSelector":{ + "type":"structure", + "members":{ + "InsightType":{"shape":"InsightType"} + } + }, + "InsightSelectors":{ + "type":"list", + "member":{"shape":"InsightSelector"} + }, + "InsightType":{ + "type":"string", + "enum":["ApiCallRateInsight"] + }, "InsufficientDependencyServiceAccessPermissionException":{ "type":"structure", "members":{ @@ -573,6 +661,12 @@ }, "exception":true }, + "InvalidEventCategoryException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidEventSelectorsException":{ "type":"structure", "members":{ @@ -585,6 +679,12 @@ }, "exception":true }, + "InvalidInsightSelectorsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidKmsKeyIdException":{ "type":"structure", "members":{ @@ -753,6 +853,7 @@ "LookupAttributes":{"shape":"LookupAttributesList"}, "StartTime":{"shape":"Date"}, "EndTime":{"shape":"Date"}, + "EventCategory":{"shape":"EventCategory"}, "MaxResults":{"shape":"MaxResults"}, "NextToken":{"shape":"NextToken"} } @@ -831,6 +932,24 @@ "EventSelectors":{"shape":"EventSelectors"} } }, + "PutInsightSelectorsRequest":{ + "type":"structure", + "required":[ + "TrailName", + "InsightSelectors" + ], + "members":{ + "TrailName":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, + "PutInsightSelectorsResponse":{ + "type":"structure", + "members":{ + "TrailARN":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, "ReadWriteType":{ "type":"string", "enum":[ @@ -959,6 +1078,7 @@ "CloudWatchLogsRoleArn":{"shape":"String"}, "KmsKeyId":{"shape":"String"}, "HasCustomEventSelectors":{"shape":"Boolean"}, + "HasInsightSelectors":{"shape":"Boolean"}, "IsOrganizationTrail":{"shape":"Boolean"} } }, diff --git a/models/apis/cloudtrail/2013-11-01/docs-2.json b/models/apis/cloudtrail/2013-11-01/docs-2.json index 65d2ca4fa43..548cf6702d0 100644 --- a/models/apis/cloudtrail/2013-11-01/docs-2.json +++ b/models/apis/cloudtrail/2013-11-01/docs-2.json @@ -7,13 +7,15 @@ "DeleteTrail": "Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail
cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.
Retrieves settings for one or more trails associated with the current region for your account.
", "GetEventSelectors": "Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following:
If your event selector includes read-only events, write-only events, or all events. This applies to both management events and data events.
If your event selector includes management events.
If your event selector includes data events, the Amazon S3 objects or AWS Lambda functions that you are logging for data events.
For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide.
", + "GetInsightSelectors": "Describes the settings for the Insights event selectors that you configured for your trail. GetInsightSelectors
shows if CloudTrail Insights event logging is enabled on the trail, and if it is, which insight types are enabled. If you run GetInsightSelectors
on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException
For more information, see Logging CloudTrail Insights Events for Trails in the AWS CloudTrail User Guide.
", "GetTrail": "Returns settings information for a specified trail.
", "GetTrailStatus": "Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region.
", "ListPublicKeys": "Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.
CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.
Lists the tags for the trail in the current region.
", "ListTrails": "Lists trails that are in the current account.
", - "LookupEvents": "Looks up management events captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes:
AWS access key
Event ID
Event name
Event source
Read only
Resource name
Resource type
User name
All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.
The rate of lookup requests is limited to one per second per account. If this limit is exceeded, a throttling error occurs.
Events that occurred during the selected time range will not be available for lookup if CloudTrail logging was not enabled when the events occurred.
Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:
AWS access key
Event ID
Event name
Event source
Read only
Resource name
Resource type
User name
Lookup supports the following attributes for Insights events:
Event ID
Event name
Event source
All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.
The rate of lookup requests is limited to two per second per account. If this limit is exceeded, a throttling error occurs.
Configures an event selector for your trail. Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events.
When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event but it doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException
is thrown.
You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails and Limits in AWS CloudTrail in the AWS CloudTrail User Guide.
", + "PutInsightSelectors": "Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors
to turn off Insights event logging, by passing an empty list of insight types. In this release, only ApiCallRateInsight
is supported as an Insights selector.
Removes the specified tags from a trail.
", "StartLogging": "Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.
", "StopLogging": "Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException
will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions.
Specifies whether the trail exists only in one region or exists in all regions.
", "Trail$LogFileValidationEnabled": "Specifies whether log file validation is enabled.
", "Trail$HasCustomEventSelectors": "Specifies if the trail has custom event selectors.
", + "Trail$HasInsightSelectors": "Specifies whether a trail has insight types specified in an InsightSelector
list.
Specifies whether the trail is an organization trail.
", "UpdateTrailRequest$IncludeGlobalServiceEvents": "Specifies whether the trail is publishing events from global services such as IAM to the log files.
", "UpdateTrailRequest$IsMultiRegionTrail": "Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider using trails that log events in all regions.
", @@ -91,7 +94,7 @@ } }, "DataResource": { - "base": "The Amazon S3 buckets or AWS Lambda functions that you specify in your event selectors for your trail to log data events. Data events provide insight into the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.
The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.
The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1
. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read
and Write
data events.
A user uploads an image file to bucket-1
.
The PutObject
API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.
A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2
.
The PutObject
API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.
The following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named MyLambdaFunction, but not for all AWS Lambda functions.
A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.
The Invoke
API operation on MyLambdaFunction is an AWS Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.
The Invoke
API operation on MyOtherLambdaFunction is an AWS Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke
operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.
The Amazon S3 buckets or AWS Lambda functions that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.
The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.
The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1
. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read
and Write
data events.
A user uploads an image file to bucket-1
.
The PutObject
API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.
A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2
.
The PutObject
API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.
The following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named MyLambdaFunction, but not for all AWS Lambda functions.
A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.
The Invoke
API operation on MyLambdaFunction is an AWS Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.
The Invoke
API operation on MyOtherLambdaFunction is an AWS Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke
operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.
Specifies the event category. If you do not specify an event category, events of the category are not returned in the response. For example, if you do not specify insight
as the value of EventCategory
, no Insights events are returned.
Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
You can configure up to five event selectors for a trail.
", "refs": { @@ -172,6 +181,12 @@ "LookupEventsResponse$Events": "A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.
" } }, + "ExcludeManagementEventSources": { + "base": null, + "refs": { + "EventSelector$ExcludeManagementEventSources": "An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service events by containing \"kms.amazonaws.com\"
. By default, ExcludeManagementEventSources
is empty, and AWS KMS events are included in events that are logged to your trail.
If you run GetInsightSelectors
on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException
.
A JSON string that contains a list of insight types that are logged on a trail.
", + "refs": { + "InsightSelectors$member": null + } + }, + "InsightSelectors": { + "base": null, + "refs": { + "GetInsightSelectorsResponse$InsightSelectors": "A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
The type of insights to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
This exception is thrown when the IAM user or role that is used to create the organization trail is lacking one or more required permissions for creating an organization trail in a required service. For more information, see Prepare For Creating a Trail For Your Organization.
", "refs": { @@ -232,6 +282,11 @@ "refs": { } }, + "InvalidEventCategoryException": { + "base": "Occurs if an event category that is not valid is specified as a value of EventCategory
.
This exception is thrown when the PutEventSelectors
operation is called with a number of event selectors or data resources that is not valid. The combination of event selectors and data resources is not valid. A trail can have up to 5 event selectors. A trail is limited to 250 data resources. These data resources can be distributed across event selectors, but the overall total cannot exceed 250.
You can:
Specify a valid number of event selectors (1 to 5) for a trail.
Specify a valid number of data resources (1 to 250) for an event selector. The limit of number of resources on an individual event selector is configurable up to 250. However, this upper limit is allowed only if the total number of data resources does not exceed 250 across all event selectors for a trail.
Specify a valid value for a parameter. For example, specifying the ReadWriteType
parameter with a value of read-only
is invalid.
The formatting or syntax of the InsightSelectors
JSON statement in your PutInsightSelectors
or GetInsightSelectors
request is not valid, or the specified insight type in the InsightSelectors
statement is not a valid insight type.
This exception is thrown when the KMS key ARN is invalid.
", "refs": { @@ -435,6 +495,16 @@ "refs": { } }, + "PutInsightSelectorsRequest": { + "base": null, + "refs": { + } + }, + "PutInsightSelectorsResponse": { + "base": null, + "refs": { + } + }, "ReadWriteType": { "base": null, "refs": { @@ -546,8 +616,11 @@ "Event$EventSource": "The AWS service that the request was made to.
", "Event$Username": "A user name or role name of the requester that called the API in the event returned.
", "Event$CloudTrailEvent": "A JSON string that contains a representation of the event returned.
", + "ExcludeManagementEventSources$member": null, "GetEventSelectorsRequest$TrailName": "Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
Start with a letter or number, and end with a letter or number
Be between 3 and 128 characters
Have no adjacent periods, underscores or dashes. Names like my-_namespace
and my--namespace
are not valid.
Not be in IP address format (for example, 192.168.5.4)
If you specify a trail ARN, it must be in the format:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The specified trail ARN that has the event selectors.
", + "GetInsightSelectorsRequest$TrailName": "Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
Start with a letter or number, and end with a letter or number
Be between 3 and 128 characters
Have no adjacent periods, underscores or dashes. Names like my-_namespace
and my--namespace
are not valid.
Not be in IP address format (for example, 192.168.5.4)
If you specify a trail ARN, it must be in the format:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.
", "GetTrailRequest$Name": "The name or the Amazon Resource Name (ARN) of the trail for which you want to retrieve settings information.
", "GetTrailStatusRequest$Name": "Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.
This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail
to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket.
Reserved for future use.
", "ListTagsRequest$NextToken": "Reserved for future use.
", "ListTagsResponse$NextToken": "Reserved for future use.
", - "ListTrailsRequest$NextToken": null, - "ListTrailsResponse$NextToken": null, + "ListTrailsRequest$NextToken": "The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.
", + "ListTrailsResponse$NextToken": "The token to use to get the next page of results after a previous API call. If the token does not appear, there are no more results to return. The token must be passed in with the same parameters as the previous call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.
", "LookupAttribute$AttributeValue": "Specifies a value for the specified AttributeKey.
", "PublicKey$Fingerprint": "The fingerprint of the public key.
", "PutEventSelectorsRequest$TrailName": "Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
Start with a letter or number, and end with a letter or number
Be between 3 and 128 characters
Have no adjacent periods, underscores or dashes. Names like my-_namespace
and my--namespace
are invalid.
Not be in IP address format (for example, 192.168.5.4)
If you specify a trail ARN, it must be in the format:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Specifies the ARN of the trail that was updated with event selectors. The format of a trail ARN is:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The name of the CloudTrail trail for which you want to change or add Insights selectors.
", + "PutInsightSelectorsResponse$TrailARN": "The Amazon Resource Name (ARN) of a trail for which you want to change or add Insights selectors.
", "RemoveTagsRequest$ResourceId": "Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is:
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The type of a resource referenced by the event returned. When the resource type cannot be determined, null is returned. Some examples of resource types are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey for IAM. To learn more about how to look up and filter events by the resource types supported for a service, see Filtering CloudTrail Events.
", "Resource$ResourceName": "The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be \"auto-scaling-test-group\" for an Auto Scaling Group or \"i-1234567\" for an EC2 Instance.
", @@ -652,7 +727,7 @@ "TrailList": { "base": null, "refs": { - "DescribeTrailsResponse$trailList": "The list of trail objects.
" + "DescribeTrailsResponse$trailList": "The list of trail objects. Trail objects with string values are only returned if values for the objects exist in a trail's configuration. For example, SNSTopicName
and SNSTopicARN
are only returned in results if a trail is configured to send SNS notifications. Similarly, KMSKeyId
only appears in results if a trail's log files are encrypted with AWS KMS-managed keys.
AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.
AWS CodeBuild supports these operations:
BatchDeleteBuilds
: Deletes one or more builds.
BatchGetProjects
: Gets information about one or more build projects. A build project defines how AWS CodeBuild runs a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A build environment is a representation of operating system, programming language runtime, and tools that AWS CodeBuild uses to run a build. You can add tags to build projects to help manage your resources and costs.
CreateProject
: Creates a build project.
CreateWebhook
: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.
UpdateWebhook
: Changes the settings of an existing webhook.
DeleteProject
: Deletes a build project.
DeleteWebhook
: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.
ListProjects
: Gets a list of build project names, with each build project name representing a single build project.
UpdateProject
: Changes the settings of an existing build project.
BatchGetBuilds
: Gets information about one or more builds.
ListBuilds
: Gets a list of build IDs, with each build ID representing a single build.
ListBuildsForProject
: Gets a list of build IDs for the specified build project, with each build ID representing a single build.
StartBuild
: Starts running a build.
StopBuild
: Attempts to stop running a build.
ListCuratedEnvironmentImages
: Gets information about Docker images that are managed by AWS CodeBuild.
DeleteSourceCredentials
: Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.
ImportSourceCredentials
: Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
ListSourceCredentials
: Returns a list of SourceCredentialsInfo
objects. Each SourceCredentialsInfo
object includes the authentication type, token ARN, and type of source provider for one set of credentials.
AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.
AWS CodeBuild supports these operations:
BatchDeleteBuilds
: Deletes one or more builds.
BatchGetBuilds
: Gets information about one or more builds.
BatchGetProjects
: Gets information about one or more build projects. A build project defines how AWS CodeBuild runs a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A build environment is a representation of operating system, programming language runtime, and tools that AWS CodeBuild uses to run a build. You can add tags to build projects to help manage your resources and costs.
CreateProject
: Creates a build project.
CreateWebhook
: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.
DeleteProject
: Deletes a build project.
DeleteSourceCredentials
: Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.
DeleteWebhook
: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.
ImportSourceCredentials
: Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
InvalidateProjectCache
: Resets the cache for a project.
ListBuilds
: Gets a list of build IDs, with each build ID representing a single build.
ListBuildsForProject
: Gets a list of build IDs for the specified build project, with each build ID representing a single build.
ListCuratedEnvironmentImages
: Gets information about Docker images that are managed by AWS CodeBuild.
ListProjects
: Gets a list of build project names, with each build project name representing a single build project.
ListSourceCredentials
: Returns a list of SourceCredentialsInfo
objects. Each SourceCredentialsInfo
object includes the authentication type, token ARN, and type of source provider for one set of credentials.
StartBuild
: Starts running a build.
StopBuild
: Attempts to stop running a build.
UpdateProject
: Changes the settings of an existing build project.
UpdateWebhook
: Changes the settings of an existing webhook.
Deletes one or more builds.
", - "BatchGetBuilds": "Gets information about builds.
", - "BatchGetProjects": "Gets information about build projects.
", + "BatchGetBuilds": "Gets information about one or more builds.
", + "BatchGetProjects": "Gets information about one or more build projects.
", "CreateProject": "Creates a build project.
", "CreateWebhook": "For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.
If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.
Deletes a build project.
", @@ -181,7 +181,7 @@ "ComputeType": { "base": null, "refs": { - "ProjectEnvironment$computeType": "Information about the compute resources the build project uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 15 GB memory and 8 vCPUs for builds.
For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.
", + "ProjectEnvironment$computeType": "Information about the compute resources the build project uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.
", "StartBuildInput$computeTypeOverride": "The name of a compute type for this build that overrides the one specified in the build project.
" } }, @@ -280,7 +280,7 @@ "EnvironmentType": { "base": null, "refs": { - "ProjectEnvironment$type": "The type of build environment to use for related builds.
", + "ProjectEnvironment$type": "The type of build environment to use for related builds.
The environment type ARM_CONTAINER
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).
The environment type LINUX_CONTAINER
with compute type build.general1.2xlarge
is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).
The environment type LINUX_GPU_CONTAINER
is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).
A container type for this build that overrides the one specified in the build project.
" } }, diff --git a/models/apis/codecommit/2015-04-13/api-2.json b/models/apis/codecommit/2015-04-13/api-2.json index bc89b924f5a..64c3aa20d82 100644 --- a/models/apis/codecommit/2015-04-13/api-2.json +++ b/models/apis/codecommit/2015-04-13/api-2.json @@ -13,6 +13,49 @@ "uid":"codecommit-2015-04-13" }, "operations":{ + "AssociateApprovalRuleTemplateWithRepository":{ + "name":"AssociateApprovalRuleTemplateWithRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateApprovalRuleTemplateWithRepositoryInput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"MaximumRuleTemplatesAssociatedWithRepositoryException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "BatchAssociateApprovalRuleTemplateWithRepositories":{ + "name":"BatchAssociateApprovalRuleTemplateWithRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesInput"}, + "output":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNamesRequiredException"}, + {"shape":"MaximumRepositoryNamesExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "BatchDescribeMergeConflicts":{ "name":"BatchDescribeMergeConflicts", "http":{ @@ -45,6 +88,27 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "BatchDisassociateApprovalRuleTemplateFromRepositories":{ + "name":"BatchDisassociateApprovalRuleTemplateFromRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesInput"}, + "output":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNamesRequiredException"}, + {"shape":"MaximumRepositoryNamesExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "BatchGetCommits":{ "name":"BatchGetCommits", "http":{ @@ -85,6 +149,24 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "CreateApprovalRuleTemplate":{ + "name":"CreateApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApprovalRuleTemplateInput"}, + "output":{"shape":"CreateApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameAlreadyExistsException"}, + {"shape":"ApprovalRuleTemplateContentRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateContentException"}, + {"shape":"InvalidApprovalRuleTemplateDescriptionException"}, + {"shape":"NumberOfRuleTemplatesExceededException"} + ] + }, "CreateBranch":{ "name":"CreateBranch", "http":{ @@ -194,6 +276,32 @@ {"shape":"SourceAndDestinationAreSameException"} ] }, + "CreatePullRequestApprovalRule":{ + "name":"CreatePullRequestApprovalRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePullRequestApprovalRuleInput"}, + "output":{"shape":"CreatePullRequestApprovalRuleOutput"}, + "errors":[ + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"ApprovalRuleNameAlreadyExistsException"}, + {"shape":"ApprovalRuleContentRequiredException"}, + {"shape":"InvalidApprovalRuleContentException"}, + {"shape":"NumberOfRulesExceededException"}, + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "CreateRepository":{ "name":"CreateRepository", "http":{ @@ -266,6 +374,20 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DeleteApprovalRuleTemplate":{ + "name":"DeleteApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApprovalRuleTemplateInput"}, + "output":{"shape":"DeleteApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateInUseException"} + ] + }, "DeleteBranch":{ "name":"DeleteBranch", "http":{ @@ -336,6 +458,29 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DeletePullRequestApprovalRule":{ + "name":"DeletePullRequestApprovalRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePullRequestApprovalRuleInput"}, + "output":{"shape":"DeletePullRequestApprovalRuleOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"CannotDeleteApprovalRuleFromTemplateException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "DeleteRepository":{ "name":"DeleteRepository", "http":{ @@ -412,6 +557,63 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DisassociateApprovalRuleTemplateFromRepository":{ + "name":"DisassociateApprovalRuleTemplateFromRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateApprovalRuleTemplateFromRepositoryInput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "EvaluatePullRequestApprovalRules":{ + "name":"EvaluatePullRequestApprovalRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EvaluatePullRequestApprovalRulesInput"}, + "output":{"shape":"EvaluatePullRequestApprovalRulesOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetApprovalRuleTemplate":{ + "name":"GetApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApprovalRuleTemplateInput"}, + "output":{"shape":"GetApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"} + ] + }, "GetBlob":{ "name":"GetBlob", "http":{ @@ -725,6 +927,48 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "GetPullRequestApprovalStates":{ + "name":"GetPullRequestApprovalStates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPullRequestApprovalStatesInput"}, + "output":{"shape":"GetPullRequestApprovalStatesOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetPullRequestOverrideState":{ + "name":"GetPullRequestOverrideState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPullRequestOverrideStateInput"}, + "output":{"shape":"GetPullRequestOverrideStateOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "GetRepository":{ "name":"GetRepository", "http":{ @@ -763,6 +1007,40 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "ListApprovalRuleTemplates":{ + "name":"ListApprovalRuleTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApprovalRuleTemplatesInput"}, + "output":{"shape":"ListApprovalRuleTemplatesOutput"}, + "errors":[ + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"} + ] + }, + "ListAssociatedApprovalRuleTemplatesForRepository":{ + "name":"ListAssociatedApprovalRuleTemplatesForRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociatedApprovalRuleTemplatesForRepositoryInput"}, + "output":{"shape":"ListAssociatedApprovalRuleTemplatesForRepositoryOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "ListBranches":{ "name":"ListBranches", "http":{ @@ -821,6 +1099,27 @@ {"shape":"InvalidContinuationTokenException"} ] }, + "ListRepositoriesForApprovalRuleTemplate":{ + "name":"ListRepositoriesForApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositoriesForApprovalRuleTemplateInput"}, + "output":{"shape":"ListRepositoriesForApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -988,6 +1287,7 @@ {"shape":"InvalidRepositoryNameException"}, {"shape":"RepositoryDoesNotExistException"}, {"shape":"ConcurrentReferenceUpdateException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1036,6 +1336,7 @@ {"shape":"InvalidRepositoryNameException"}, {"shape":"RepositoryDoesNotExistException"}, {"shape":"RepositoryNotAssociatedWithPullRequestException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1084,6 +1385,32 @@ {"shape":"RepositoryDoesNotExistException"}, {"shape":"RepositoryNotAssociatedWithPullRequestException"}, {"shape":"ConcurrentReferenceUpdateException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "OverridePullRequestApprovalRules":{ + "name":"OverridePullRequestApprovalRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OverridePullRequestApprovalRulesInput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"InvalidOverrideStatusException"}, + {"shape":"OverrideStatusRequiredException"}, + {"shape":"OverrideAlreadySetException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"PullRequestAlreadyClosedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1329,6 +1656,53 @@ {"shape":"TagPolicyException"} ] }, + "UpdateApprovalRuleTemplateContent":{ + "name":"UpdateApprovalRuleTemplateContent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateContentInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateContentOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidApprovalRuleTemplateContentException"}, + {"shape":"InvalidRuleContentSha256Exception"}, + {"shape":"ApprovalRuleTemplateContentRequiredException"} + ] + }, + "UpdateApprovalRuleTemplateDescription":{ + "name":"UpdateApprovalRuleTemplateDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateDescriptionInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateDescriptionOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidApprovalRuleTemplateDescriptionException"} + ] + }, + "UpdateApprovalRuleTemplateName":{ + "name":"UpdateApprovalRuleTemplateName", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateNameInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateNameOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"ApprovalRuleTemplateNameAlreadyExistsException"} + ] + }, "UpdateComment":{ "name":"UpdateComment", "http":{ @@ -1368,6 +1742,59 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "UpdatePullRequestApprovalRuleContent":{ + "name":"UpdatePullRequestApprovalRuleContent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePullRequestApprovalRuleContentInput"}, + "output":{"shape":"UpdatePullRequestApprovalRuleContentOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"ApprovalRuleDoesNotExistException"}, + {"shape":"InvalidRuleContentSha256Exception"}, + {"shape":"ApprovalRuleContentRequiredException"}, + {"shape":"InvalidApprovalRuleContentException"}, + {"shape":"CannotModifyApprovalRuleFromTemplateException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "UpdatePullRequestApprovalState":{ + "name":"UpdatePullRequestApprovalState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePullRequestApprovalStateInput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"InvalidApprovalStateException"}, + {"shape":"ApprovalStateRequiredException"}, + {"shape":"PullRequestCannotBeApprovedByAuthorException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"MaximumNumberOfApprovalsExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "UpdatePullRequestDescription":{ "name":"UpdatePullRequestDescription", "http":{ @@ -1466,13 +1893,228 @@ "exception":true }, "AdditionalData":{"type":"string"}, + "Approval":{ + "type":"structure", + "members":{ + "userArn":{"shape":"Arn"}, + "approvalState":{"shape":"ApprovalState"} + } + }, + "ApprovalList":{ + "type":"list", + "member":{"shape":"Approval"} + }, + "ApprovalRule":{ + "type":"structure", + "members":{ + "approvalRuleId":{"shape":"ApprovalRuleId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"}, + "ruleContentSha256":{"shape":"RuleContentSha256"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "lastModifiedUser":{"shape":"Arn"}, + "originApprovalRuleTemplate":{"shape":"OriginApprovalRuleTemplate"} + } + }, + "ApprovalRuleContent":{ + "type":"string", + "max":3000, + "min":1 + }, + "ApprovalRuleContentRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleEventMetadata":{ + "type":"structure", + "members":{ + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleId":{"shape":"ApprovalRuleId"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "ApprovalRuleId":{"type":"string"}, + "ApprovalRuleName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApprovalRuleNameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleOverriddenEventMetadata":{ + "type":"structure", + "members":{ + "revisionId":{"shape":"RevisionId"}, + "overrideStatus":{"shape":"OverrideStatus"} + } + }, + "ApprovalRuleTemplate":{ + "type":"structure", + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"}, + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"}, + "approvalRuleTemplateContent":{"shape":"ApprovalRuleTemplateContent"}, + "ruleContentSha256":{"shape":"RuleContentSha256"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "lastModifiedUser":{"shape":"Arn"} + } + }, + "ApprovalRuleTemplateContent":{ + "type":"string", + "max":3000, + "min":1 + }, + "ApprovalRuleTemplateContentRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateDescription":{ + "type":"string", + "max":1000, + "min":0 + }, + "ApprovalRuleTemplateDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateId":{"type":"string"}, + "ApprovalRuleTemplateInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApprovalRuleTemplateNameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateNameList":{ + "type":"list", + "member":{"shape":"ApprovalRuleTemplateName"} + }, + "ApprovalRuleTemplateNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRulesList":{ + "type":"list", + "member":{"shape":"ApprovalRule"} + }, + "ApprovalRulesNotSatisfiedList":{ + "type":"list", + "member":{"shape":"ApprovalRuleName"} + }, + "ApprovalRulesSatisfiedList":{ + "type":"list", + "member":{"shape":"ApprovalRuleName"} + }, + "ApprovalState":{ + "type":"string", + "enum":[ + "APPROVE", + "REVOKE" + ] + }, + "ApprovalStateChangedEventMetadata":{ + "type":"structure", + "members":{ + "revisionId":{"shape":"RevisionId"}, + "approvalStatus":{"shape":"ApprovalState"} + } + }, + "ApprovalStateRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Approved":{"type":"boolean"}, "Arn":{"type":"string"}, + "AssociateApprovalRuleTemplateWithRepositoryInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryName" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, "AuthorDoesNotExistException":{ "type":"structure", "members":{ }, "exception":true }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesError":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "errorCode":{"shape":"ErrorCode"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList":{ + "type":"list", + "member":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesError"} + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryNames" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput":{ + "type":"structure", + "required":[ + "associatedRepositoryNames", + "errors" + ], + "members":{ + "associatedRepositoryNames":{"shape":"RepositoryNameList"}, + "errors":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList"} + } + }, "BatchDescribeMergeConflictsError":{ "type":"structure", "required":[ @@ -1527,6 +2169,40 @@ "baseCommitId":{"shape":"ObjectId"} } }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "errorCode":{"shape":"ErrorCode"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList":{ + "type":"list", + "member":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesError"} + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryNames" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput":{ + "type":"structure", + "required":[ + "disassociatedRepositoryNames", + "errors" + ], + "members":{ + "disassociatedRepositoryNames":{"shape":"RepositoryNameList"}, + "errors":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList"} + } + }, "BatchGetCommitsError":{ "type":"structure", "members":{ @@ -1637,6 +2313,18 @@ }, "exception":true }, + "CannotDeleteApprovalRuleFromTemplateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CannotModifyApprovalRuleFromTemplateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "CapitalBoolean":{"type":"boolean"}, "ChangeTypeEnum":{ "type":"string", @@ -1867,6 +2555,25 @@ "member":{"shape":"Conflict"} }, "Content":{"type":"string"}, + "CreateApprovalRuleTemplateInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "approvalRuleTemplateContent" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateContent":{"shape":"ApprovalRuleTemplateContent"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"} + } + }, + "CreateApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "CreateBranchInput":{ "type":"structure", "required":[ @@ -1909,6 +2616,26 @@ "filesDeleted":{"shape":"FilesMetadata"} } }, + "CreatePullRequestApprovalRuleInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName", + "approvalRuleContent" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "CreatePullRequestApprovalRuleOutput":{ + "type":"structure", + "required":["approvalRule"], + "members":{ + "approvalRule":{"shape":"ApprovalRule"} + } + }, "CreatePullRequestInput":{ "type":"structure", "required":[ @@ -1984,6 +2711,20 @@ }, "exception":true }, + "DeleteApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "DeleteApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplateId"], + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"} + } + }, "DeleteBranchInput":{ "type":"structure", "required":[ @@ -2059,6 +2800,24 @@ "filePath":{"shape":"Path"} } }, + "DeletePullRequestApprovalRuleInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"} + } + }, + "DeletePullRequestApprovalRuleOutput":{ + "type":"structure", + "required":["approvalRuleId"], + "members":{ + "approvalRuleId":{"shape":"ApprovalRuleId"} + } + }, "DeleteRepositoryInput":{ "type":"structure", "required":["repositoryName"], @@ -2151,6 +2910,17 @@ }, "exception":true }, + "DisassociateApprovalRuleTemplateFromRepositoryInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryName" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, "Email":{"type":"string"}, "EncryptionIntegrityChecksFailedException":{ "type":"structure", @@ -2185,6 +2955,33 @@ }, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "EvaluatePullRequestApprovalRulesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "EvaluatePullRequestApprovalRulesOutput":{ + "type":"structure", + "required":["evaluation"], + "members":{ + "evaluation":{"shape":"Evaluation"} + } + }, + "Evaluation":{ + "type":"structure", + "members":{ + "approved":{"shape":"Approved"}, + "overridden":{"shape":"Overridden"}, + "approvalRulesSatisfied":{"shape":"ApprovalRulesSatisfiedList"}, + "approvalRulesNotSatisfied":{"shape":"ApprovalRulesNotSatisfiedList"} + } + }, "EventDate":{"type":"timestamp"}, "ExceptionName":{"type":"string"}, "File":{ @@ -2323,6 +3120,20 @@ "type":"list", "member":{"shape":"Folder"} }, + "GetApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "GetApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "GetBlobInput":{ "type":"structure", "required":[ @@ -2597,6 +3408,23 @@ "baseCommitId":{"shape":"ObjectId"} } }, + "GetPullRequestApprovalStatesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "GetPullRequestApprovalStatesOutput":{ + "type":"structure", + "members":{ + "approvals":{"shape":"ApprovalList"} + } + }, "GetPullRequestInput":{ "type":"structure", "required":["pullRequestId"], @@ -2611,6 +3439,24 @@ "pullRequest":{"shape":"PullRequest"} } }, + "GetPullRequestOverrideStateInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "GetPullRequestOverrideStateOutput":{ + "type":"structure", + "members":{ + "overridden":{"shape":"Overridden"}, + "overrider":{"shape":"Arn"} + } + }, "GetRepositoryInput":{ "type":"structure", "required":["repositoryName"], @@ -2651,6 +3497,42 @@ }, "exception":true }, + "InvalidApprovalRuleContentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateContentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateDescriptionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalStateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidAuthorArnException":{ "type":"structure", "members":{ @@ -2789,6 +3671,12 @@ }, "exception":true }, + "InvalidOverrideStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidParentCommitIdException":{ "type":"structure", "members":{ @@ -2903,6 +3791,18 @@ }, "exception":true }, + "InvalidRevisionIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRuleContentSha256Exception":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidSortByException":{ "type":"structure", "members":{ @@ -2980,6 +3880,36 @@ "box":true }, "LineNumber":{"type":"integer"}, + "ListApprovalRuleTemplatesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListApprovalRuleTemplatesOutput":{ + "type":"structure", + "members":{ + "approvalRuleTemplateNames":{"shape":"ApprovalRuleTemplateNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput":{ + "type":"structure", + "members":{ + "approvalRuleTemplateNames":{"shape":"ApprovalRuleTemplateNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListBranchesInput":{ "type":"structure", "required":["repositoryName"], @@ -3014,6 +3944,22 @@ "nextToken":{"shape":"NextToken"} } }, + "ListRepositoriesForApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListRepositoriesForApprovalRuleTemplateOutput":{ + "type":"structure", + "members":{ + "repositoryNames":{"shape":"RepositoryNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListRepositoriesInput":{ "type":"structure", "members":{ @@ -3089,6 +4035,12 @@ }, "exception":true }, + "MaximumNumberOfApprovalsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaximumOpenPullRequestsExceededException":{ "type":"structure", "members":{ @@ -3107,6 +4059,12 @@ }, "exception":true }, + "MaximumRuleTemplatesAssociatedWithRepositoryException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MergeBranchesByFastForwardInput":{ "type":"structure", "required":[ @@ -3336,6 +4294,18 @@ "exception":true }, "NumberOfConflicts":{"type":"integer"}, + "NumberOfRuleTemplatesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NumberOfRulesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "ObjectId":{"type":"string"}, "ObjectSize":{"type":"long"}, "ObjectTypeEnum":{ @@ -3362,6 +4332,46 @@ "descending" ] }, + "OriginApprovalRuleTemplate":{ + "type":"structure", + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"}, + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "Overridden":{"type":"boolean"}, + "OverrideAlreadySetException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OverridePullRequestApprovalRulesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId", + "overrideStatus" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"}, + "overrideStatus":{"shape":"OverrideStatus"} + } + }, + "OverrideStatus":{ + "type":"string", + "enum":[ + "OVERRIDE", + "REVOKE" + ] + }, + "OverrideStatusRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "ParentCommitDoesNotExistException":{ "type":"structure", "members":{ @@ -3496,7 +4506,9 @@ "pullRequestStatus":{"shape":"PullRequestStatusEnum"}, "authorArn":{"shape":"Arn"}, "pullRequestTargets":{"shape":"PullRequestTargetList"}, - "clientRequestToken":{"shape":"ClientRequestToken"} + "clientRequestToken":{"shape":"ClientRequestToken"}, + "revisionId":{"shape":"RevisionId"}, + "approvalRules":{"shape":"ApprovalRulesList"} } }, "PullRequestAlreadyClosedException":{ @@ -3505,6 +4517,18 @@ }, "exception":true }, + "PullRequestApprovalRulesNotSatisfiedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PullRequestCannotBeApprovedByAuthorException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "PullRequestCreatedEventMetadata":{ "type":"structure", "members":{ @@ -3530,7 +4554,10 @@ "pullRequestCreatedEventMetadata":{"shape":"PullRequestCreatedEventMetadata"}, "pullRequestStatusChangedEventMetadata":{"shape":"PullRequestStatusChangedEventMetadata"}, "pullRequestSourceReferenceUpdatedEventMetadata":{"shape":"PullRequestSourceReferenceUpdatedEventMetadata"}, - "pullRequestMergedStateChangedEventMetadata":{"shape":"PullRequestMergedStateChangedEventMetadata"} + "pullRequestMergedStateChangedEventMetadata":{"shape":"PullRequestMergedStateChangedEventMetadata"}, + "approvalRuleEventMetadata":{"shape":"ApprovalRuleEventMetadata"}, + "approvalStateChangedEventMetadata":{"shape":"ApprovalStateChangedEventMetadata"}, + "approvalRuleOverriddenEventMetadata":{"shape":"ApprovalRuleOverriddenEventMetadata"} } }, "PullRequestEventList":{ @@ -3543,7 +4570,12 @@ "PULL_REQUEST_CREATED", "PULL_REQUEST_STATUS_CHANGED", "PULL_REQUEST_SOURCE_REFERENCE_UPDATED", - "PULL_REQUEST_MERGE_STATE_CHANGED" + "PULL_REQUEST_MERGE_STATE_CHANGED", + "PULL_REQUEST_APPROVAL_RULE_CREATED", + "PULL_REQUEST_APPROVAL_RULE_UPDATED", + "PULL_REQUEST_APPROVAL_RULE_DELETED", + "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN", + "PULL_REQUEST_APPROVAL_STATE_CHANGED" ] }, "PullRequestId":{"type":"string"}, @@ -3922,6 +4954,20 @@ }, "exception":true }, + "RevisionId":{"type":"string"}, + "RevisionIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RevisionNotCurrentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RuleContentSha256":{"type":"string"}, "SameFileContentException":{ "type":"structure", "members":{ @@ -4134,6 +5180,61 @@ "tagKeys":{"shape":"TagKeysList"} } }, + "UpdateApprovalRuleTemplateContentInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "newRuleContent" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "newRuleContent":{"shape":"ApprovalRuleTemplateContent"}, + "existingRuleContentSha256":{"shape":"RuleContentSha256"} + } + }, + "UpdateApprovalRuleTemplateContentOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, + "UpdateApprovalRuleTemplateDescriptionInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "approvalRuleTemplateDescription" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"} + } + }, + "UpdateApprovalRuleTemplateDescriptionOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, + "UpdateApprovalRuleTemplateNameInput":{ + "type":"structure", + "required":[ + "oldApprovalRuleTemplateName", + "newApprovalRuleTemplateName" + ], + "members":{ + "oldApprovalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "newApprovalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "UpdateApprovalRuleTemplateNameOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "UpdateCommentInput":{ "type":"structure", "required":[ @@ -4162,6 +5263,40 @@ "defaultBranchName":{"shape":"BranchName"} } }, + "UpdatePullRequestApprovalRuleContentInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName", + "newRuleContent" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "existingRuleContentSha256":{"shape":"RuleContentSha256"}, + "newRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "UpdatePullRequestApprovalRuleContentOutput":{ + "type":"structure", + "required":["approvalRule"], + "members":{ + "approvalRule":{"shape":"ApprovalRule"} + } + }, + "UpdatePullRequestApprovalStateInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId", + "approvalState" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"}, + "approvalState":{"shape":"ApprovalState"} + } + }, "UpdatePullRequestDescriptionInput":{ "type":"structure", "required":[ diff --git a/models/apis/codecommit/2015-04-13/docs-2.json b/models/apis/codecommit/2015-04-13/docs-2.json index 16dedd9bad7..04af554911e 100644 --- a/models/apis/codecommit/2015-04-13/docs-2.json +++ b/models/apis/codecommit/2015-04-13/docs-2.json @@ -1,61 +1,82 @@ { "version": "2.0", - "service": "This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.
CreateRepository, which creates an AWS CodeCommit repository.
DeleteRepository, which deletes an AWS CodeCommit repository.
GetRepository, which returns information about a specified repository.
ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.
UpdateRepositoryDescription, which sets or updates the description of the repository.
UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
CreateBranch, which creates a new branch in a specified repository.
DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.
GetBranch, which returns information about a specified branch.
ListBranches, which lists all branches for a specified repository.
UpdateDefaultBranch, which changes the default branch for a repository.
Files, by calling the following:
DeleteFile, which deletes the content of a specified file from a specified branch.
GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.
GetFile, which returns the base-64 encoded content of a specified file.
GetFolder, which returns the contents of a specified folder or directory.
PutFile, which adds or modifies a single file in a specified repository and branch.
Commits, by calling the following:
BatchGetCommits, which returns information about one or more commits in a repository
CreateCommit, which creates a commit for changes to a repository.
GetCommit, which returns information about a commit, including commit messages and author and committer information.
GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).
Merges, by calling the following:
BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.
CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.
DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.
GetMergeCommit, which returns information about the merge between a source and destination commit.
GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.
GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.
MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.
MergeBranchesBySquash, which merges two branches using the squash merge option.
MergeBranchesByThreeWay, which merges two branches using the three-way merge option.
Pull requests, by calling the following:
CreatePullRequest, which creates a pull request in a specified repository.
DescribePullRequestEvents, which returns information about one or more pull request events.
GetCommentsForPullRequest, which returns information about comments on a specified pull request.
GetPullRequest, which returns information about a specified pull request.
ListPullRequests, which lists all pull requests for a repository.
MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option.
MergePullRequestBySquash, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the squash merge option.
MergePullRequestByThreeWay. which merges the source destination branch of a pull request into the specified destination branch for that pull request using the three-way merge option.
PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request.
UpdatePullRequestDescription, which updates the description of a pull request.
UpdatePullRequestStatus, which updates the status of a pull request.
UpdatePullRequestTitle, which updates the title of a pull request.
Comments in a repository, by calling the following:
DeleteCommentContent, which deletes the content of a comment on a commit in a repository.
GetComment, which returns information about a comment on a commit.
GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository.
PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository.
PostCommentReply, which creates a reply to a comment.
UpdateComment, which updates the content of a comment on a commit in a repository.
Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:
ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.
TagResource, which adds or updates tags for a resource in AWS CodeCommit.
UntagResource, which removes tags for a resource in AWS CodeCommit.
Triggers, by calling the following:
GetRepositoryTriggers, which returns information about triggers configured for a repository.
PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.
TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.
", + "service": "This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.
CreateRepository, which creates an AWS CodeCommit repository.
DeleteRepository, which deletes an AWS CodeCommit repository.
GetRepository, which returns information about a specified repository.
ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.
UpdateRepositoryDescription, which sets or updates the description of the repository.
UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
CreateBranch, which creates a branch in a specified repository.
DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.
GetBranch, which returns information about a specified branch.
ListBranches, which lists all branches for a specified repository.
UpdateDefaultBranch, which changes the default branch for a repository.
Files, by calling the following:
DeleteFile, which deletes the content of a specified file from a specified branch.
GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.
GetFile, which returns the base-64 encoded content of a specified file.
GetFolder, which returns the contents of a specified folder or directory.
PutFile, which adds or modifies a single file in a specified repository and branch.
Commits, by calling the following:
BatchGetCommits, which returns information about one or more commits in a repository.
CreateCommit, which creates a commit for changes to a repository.
GetCommit, which returns information about a commit, including commit messages and author and committer information.
GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).
Merges, by calling the following:
BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.
CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.
DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.
GetMergeCommit, which returns information about the merge between a source and destination commit.
GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.
GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.
MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.
MergeBranchesBySquash, which merges two branches using the squash merge option.
MergeBranchesByThreeWay, which merges two branches using the three-way merge option.
Pull requests, by calling the following:
CreatePullRequest, which creates a pull request in a specified repository.
CreatePullRequestApprovalRule, which creates an approval rule for a specified pull request.
DeletePullRequestApprovalRule, which deletes an approval rule for a specified pull request.
DescribePullRequestEvents, which returns information about one or more pull request events.
EvaluatePullRequestApprovalRules, which evaluates whether a pull request has met all the conditions specified in its associated approval rules.
GetCommentsForPullRequest, which returns information about comments on a specified pull request.
GetPullRequest, which returns information about a specified pull request.
GetPullRequestApprovalStates, which returns information about the approval states for a specified pull request.
GetPullRequestOverrideState, which returns information about whether approval rules have been set aside (overriden) for a pull request, and if so, the Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request.
ListPullRequests, which lists all pull requests for a repository.
MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option.
MergePullRequestBySquash, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the squash merge option.
MergePullRequestByThreeWay. which merges the source destination branch of a pull request into the specified destination branch for that pull request using the three-way merge option.
OverridePullRequestApprovalRules, which sets aside all approval rule requirements for a pull request.
PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request.
UpdatePullRequestApprovalRuleContent, which updates the structure of an approval rule for a pull request.
UpdatePullRequestApprovalState, which updates the state of an approval on a pull request.
UpdatePullRequestDescription, which updates the description of a pull request.
UpdatePullRequestStatus, which updates the status of a pull request.
UpdatePullRequestTitle, which updates the title of a pull request.
Approval rule templates, by calling the following:
AssociateApprovalRuleTemplateWithRepository, which associates a template with a specified repository. After the template is associated with a repository, AWS CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repository.
BatchAssociateApprovalRuleTemplateWithRepositories, which associates a template with one or more specified repositories. After the template is associated with a repository, AWS CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repositories.
BatchDisassociateApprovalRuleTemplateFromRepositories, which removes the association between a template and specified repositories so that approval rules based on the template are not automatically created when pull requests are created in those repositories.
CreateApprovalRuleTemplate, which creates a template for approval rules that can then be associated with one or more repositories in your AWS account.
DeleteApprovalRuleTemplate, which deletes the specified template. It does not remove approval rules on pull requests already created with the template.
DisassociateApprovalRuleTemplateFromRepository, which removes the association between a template and a repository so that approval rules based on the template are not automatically created when pull requests are created in the specified repository.
GetApprovalRuleTemplate, which returns information about an approval rule template.
ListApprovalRuleTemplates, which lists all approval rule templates in the AWS Region in your AWS account.
ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval rule templates that are associated with a specified repository.
ListRepositoriesForApprovalRuleTemplate, which lists all repositories associated with the specified approval rule template.
UpdateApprovalRuleTemplateDescription, which updates the description of an approval rule template.
UpdateApprovalRuleTemplateName, which updates the name of an approval rule template.
UpdateApprovalRuleTemplateContent, which updates the content of an approval rule template.
Comments in a repository, by calling the following:
DeleteCommentContent, which deletes the content of a comment on a commit in a repository.
GetComment, which returns information about a comment on a commit.
GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository.
PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository.
PostCommentReply, which creates a reply to a comment.
UpdateComment, which updates the content of a comment on a commit in a repository.
Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:
ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.
TagResource, which adds or updates tags for a resource in AWS CodeCommit.
UntagResource, which removes tags for a resource in AWS CodeCommit.
Triggers, by calling the following:
GetRepositoryTriggers, which returns information about triggers configured for a repository.
PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.
TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.
", "operations": { + "AssociateApprovalRuleTemplateWithRepository": "Creates an association between an approval rule template and a specified repository. Then, the next time a pull request is created in the repository where the destination reference (if specified) matches the destination reference (branch) for the pull request, an approval rule that matches the template conditions is automatically created for that pull request. If no destination references are specified in the template, an approval rule that matches the template contents is created for all pull requests in that repository.
", + "BatchAssociateApprovalRuleTemplateWithRepositories": "Creates an association between an approval rule template and one or more specified repositories.
", "BatchDescribeMergeConflicts": "Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.
", + "BatchDisassociateApprovalRuleTemplateFromRepositories": "Removes the association between an approval rule template and one or more specified repositories.
", "BatchGetCommits": "Returns information about the contents of one or more commits in a repository.
", - "BatchGetRepositories": "Returns information about one or more repositories.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.
Creates a new branch in a repository and points the branch to a commit.
Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.
Returns information about one or more repositories.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.
Creates a template for approval rules that can then be associated with one or more repositories in your AWS account. When you associate a template with a repository, AWS CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.
", + "CreateBranch": "Creates a branch in a repository and points the branch to a commit.
Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.
Creates a commit for a repository on the tip of a specified branch.
", "CreatePullRequest": "Creates a pull request in the specified repository.
", + "CreatePullRequestApprovalRule": "Creates an approval rule for a pull request.
", "CreateRepository": "Creates a new, empty repository.
", - "CreateUnreferencedMergeCommit": "Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy, as that strategy does not create a merge commit.
This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.
Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy because that strategy does not create a merge commit.
This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.
Deletes a specified approval rule template. Deleting a template does not remove approval rules on pull requests already created with the template.
", "DeleteBranch": "Deletes a branch from a repository, unless that branch is the default branch for the repository.
", "DeleteCommentContent": "Deletes the content of a comment made on a change, file, or commit in a repository.
", - "DeleteFile": "Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file will still exist in the commits prior to the commit that contains the deletion.
", - "DeleteRepository": "Deletes a repository. If a specified repository was already deleted, a null repository ID will be returned.
Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository will fail.
Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception will be thrown.
", + "DeleteFile": "Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file still exists in the commits earlier to the commit that contains the deletion.
", + "DeletePullRequestApprovalRule": "Deletes an approval rule from a specified pull request. Approval rules can be deleted from a pull request only if the pull request is open, and if the approval rule was created specifically for a pull request and not generated from an approval rule template associated with the repository where the pull request was created. You cannot delete an approval rule from a merged or closed pull request.
", + "DeleteRepository": "Deletes a repository. If a specified repository was already deleted, a null repository ID is returned.
Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository fail.
Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception is thrown.
", "DescribePullRequestEvents": "Returns information about one or more pull request events.
", - "GetBlob": "Returns the base-64 encoded content of an individual blob within a repository.
", + "DisassociateApprovalRuleTemplateFromRepository": "Removes the association between a template and a repository so that approval rules based on the template are not automatically created when pull requests are created in the specified repository. This does not delete any approval rules previously created for pull requests through the template association.
", + "EvaluatePullRequestApprovalRules": "Evaluates whether a pull request has met all the conditions specified in its associated approval rules.
", + "GetApprovalRuleTemplate": "Returns information about a specified approval rule template.
", + "GetBlob": "Returns the base-64 encoded content of an individual blob in a repository.
", "GetBranch": "Returns information about a repository branch, including its name and the last commit ID.
", "GetComment": "Returns the content of a comment made on a change, file, or commit in a repository.
", "GetCommentsForComparedCommit": "Returns information about comments made on the comparison between two commits.
", "GetCommentsForPullRequest": "Returns comments made on a pull request.
", "GetCommit": "Returns information about a commit, including commit message and committer information.
", - "GetDifferences": "Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference). Results can be limited to a specified path.
", + "GetDifferences": "Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference). Results can be limited to a specified path.
", "GetFile": "Returns the base-64 encoded contents of a specified file and its metadata.
", "GetFolder": "Returns the contents of a specified folder in a repository.
", "GetMergeCommit": "Returns information about a specified merge commit.
", "GetMergeConflicts": "Returns information about merge conflicts between the before and after commit IDs for a pull request in a repository.
", - "GetMergeOptions": "Returns information about the merge options available for merging two specified branches. For details about why a particular merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.
", + "GetMergeOptions": "Returns information about the merge options available for merging two specified branches. For details about why a merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.
", "GetPullRequest": "Gets information about a pull request in a specified repository.
", - "GetRepository": "Returns information about a repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.
Gets information about the approval states for a specified pull request. Approval states only apply to pull requests that have one or more approval rules applied to them.
", + "GetPullRequestOverrideState": "Returns information about whether approval rules have been set aside (overridden) for a pull request, and if so, the Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request.
", + "GetRepository": "Returns information about a repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.
Gets information about triggers configured for a repository.
", + "ListApprovalRuleTemplates": "Lists all approval rule templates in the specified AWS Region in your AWS account. If an AWS Region is not specified, the AWS Region where you are signed in is used.
", + "ListAssociatedApprovalRuleTemplatesForRepository": "Lists all approval rule templates that are associated with a specified repository.
", "ListBranches": "Gets information about one or more branches in a repository.
", "ListPullRequests": "Returns a list of pull requests for a specified repository. The return list can be refined by pull request status or pull request author ARN.
", "ListRepositories": "Gets information about one or more repositories.
", - "ListTagsForResource": "Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", + "ListRepositoriesForApprovalRuleTemplate": "Lists all repositories associated with the specified approval rule template.
", + "ListTagsForResource": "Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", "MergeBranchesByFastForward": "Merges two branches using the fast-forward merge strategy.
", "MergeBranchesBySquash": "Merges two branches using the squash merge strategy.
", "MergeBranchesByThreeWay": "Merges two specified branches using the three-way merge strategy.
", "MergePullRequestByFastForward": "Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge strategy. If the merge is successful, it closes the pull request.
", "MergePullRequestBySquash": "Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the squash merge strategy. If the merge is successful, it closes the pull request.
", "MergePullRequestByThreeWay": "Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the three-way merge strategy. If the merge is successful, it closes the pull request.
", + "OverridePullRequestApprovalRules": "Sets aside (overrides) all approval rule requirements for a specified pull request.
", "PostCommentForComparedCommit": "Posts a comment on the comparison between two commits.
", "PostCommentForPullRequest": "Posts a comment on a pull request.
", "PostCommentReply": "Posts a comment in reply to an existing comment on a comparison between commits or a pull request.
", "PutFile": "Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.
", - "PutRepositoryTriggers": "Replaces all triggers for a repository. This can be used to create or delete triggers.
", - "TagResource": "Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", - "TestRepositoryTriggers": "Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test will send data from the last commit. If no data is available, sample data will be generated.
", - "UntagResource": "Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", + "PutRepositoryTriggers": "Replaces all triggers for a repository. Used to create or delete triggers.
", + "TagResource": "Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", + "TestRepositoryTriggers": "Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test sends data from the last commit. If no data is available, sample data is generated.
", + "UntagResource": "Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.
", + "UpdateApprovalRuleTemplateContent": "Updates the content of an approval rule template. You can change the number of required approvals, the membership of the approval rule, and whether an approval pool is defined.
", + "UpdateApprovalRuleTemplateDescription": "Updates the description for a specified approval rule template.
", + "UpdateApprovalRuleTemplateName": "Updates the name of a specified approval rule template.
", "UpdateComment": "Replaces the contents of a comment.
", "UpdateDefaultBranch": "Sets or changes the default branch name for the specified repository.
If you use this operation to change the default branch name to the current default branch name, a success message is returned even though the default branch did not change.
Updates the structure of an approval rule created specifically for a pull request. For example, you can change the number of required approvers and the approval pool for approvers.
", + "UpdatePullRequestApprovalState": "Updates the state of a user's approval on a pull request. The user is derived from the signed-in account when the request is made.
", "UpdatePullRequestDescription": "Replaces the contents of the description of a pull request.
", "UpdatePullRequestStatus": "Updates the status of a pull request.
", "UpdatePullRequestTitle": "Replaces the title of a pull request.
", - "UpdateRepositoryDescription": "Sets or changes the comment or description for a repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.
Renames a repository. The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix \".git\" is prohibited. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide.
" + "UpdateRepositoryDescription": "Sets or changes the comment or description for a repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.
Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide.
" }, "shapes": { "AccountId": { @@ -72,20 +93,239 @@ "AdditionalData": { "base": null, "refs": { - "Commit$additionalData": "Any additional data associated with the specified commit.
" + "Commit$additionalData": "Any other data associated with the specified commit.
" + } + }, + "Approval": { + "base": "Returns information about a specific approval on a pull request.
", + "refs": { + "ApprovalList$member": null + } + }, + "ApprovalList": { + "base": null, + "refs": { + "GetPullRequestApprovalStatesOutput$approvals": "Information about users who have approved the pull request.
" + } + }, + "ApprovalRule": { + "base": "Returns information about an approval rule.
", + "refs": { + "ApprovalRulesList$member": null, + "CreatePullRequestApprovalRuleOutput$approvalRule": "Information about the created approval rule.
", + "UpdatePullRequestApprovalRuleContentOutput$approvalRule": "Information about the updated approval rule.
" + } + }, + "ApprovalRuleContent": { + "base": null, + "refs": { + "ApprovalRule$approvalRuleContent": "The content of the approval rule.
", + "ApprovalRuleEventMetadata$approvalRuleContent": "The content of the approval rule.
", + "CreatePullRequestApprovalRuleInput$approvalRuleContent": "The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.
When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:
CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:
An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)
A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)
This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).
Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.
For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.
The updated content for the approval rule.
When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:
CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:
An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)
A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)
This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).
Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.
For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.
The content for the approval rule is empty. You must provide some content for an approval rule. The content cannot be null.
", + "refs": { + } + }, + "ApprovalRuleDoesNotExistException": { + "base": "The specified approval rule does not exist.
", + "refs": { + } + }, + "ApprovalRuleEventMetadata": { + "base": "Returns information about an event for an approval rule.
", + "refs": { + "PullRequestEvent$approvalRuleEventMetadata": "Information about a pull request event.
" + } + }, + "ApprovalRuleId": { + "base": null, + "refs": { + "ApprovalRule$approvalRuleId": "The system-generated ID of the approval rule.
", + "ApprovalRuleEventMetadata$approvalRuleId": "The system-generated ID of the approval rule.
", + "DeletePullRequestApprovalRuleOutput$approvalRuleId": "The ID of the deleted approval rule.
If the approval rule was deleted in an earlier API call, the response is 200 OK without content.
The name of the approval rule.
", + "ApprovalRuleEventMetadata$approvalRuleName": "The name of the approval rule.
", + "ApprovalRulesNotSatisfiedList$member": null, + "ApprovalRulesSatisfiedList$member": null, + "CreatePullRequestApprovalRuleInput$approvalRuleName": "The name for the approval rule.
", + "DeletePullRequestApprovalRuleInput$approvalRuleName": "The name of the approval rule you want to delete.
", + "UpdatePullRequestApprovalRuleContentInput$approvalRuleName": "The name of the approval rule you want to update.
" + } + }, + "ApprovalRuleNameAlreadyExistsException": { + "base": "An approval rule with that name already exists. Approval rule names must be unique within the scope of a pull request.
", + "refs": { + } + }, + "ApprovalRuleNameRequiredException": { + "base": "An approval rule name is required, but was not specified.
", + "refs": { + } + }, + "ApprovalRuleOverriddenEventMetadata": { + "base": "Returns information about an override event for approval rules for a pull request.
", + "refs": { + "PullRequestEvent$approvalRuleOverriddenEventMetadata": "Information about an approval rule override event for a pull request.
" + } + }, + "ApprovalRuleTemplate": { + "base": "Returns information about an approval rule template.
", + "refs": { + "CreateApprovalRuleTemplateOutput$approvalRuleTemplate": "The content and structure of the created approval rule template.
", + "GetApprovalRuleTemplateOutput$approvalRuleTemplate": "The content and structure of the approval rule template.
", + "UpdateApprovalRuleTemplateContentOutput$approvalRuleTemplate": null, + "UpdateApprovalRuleTemplateDescriptionOutput$approvalRuleTemplate": "The structure and content of the updated approval rule template.
", + "UpdateApprovalRuleTemplateNameOutput$approvalRuleTemplate": "The structure and content of the updated approval rule template.
" + } + }, + "ApprovalRuleTemplateContent": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateContent": "The content of the approval rule template.
", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateContent": "The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.
When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:
CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:
An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)
A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)
This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).
Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.
For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.
The content that replaces the existing content of the rule. Content statements must be complete. You cannot provide only the changes.
" + } + }, + "ApprovalRuleTemplateContentRequiredException": { + "base": "The content for the approval rule template is empty. You must provide some content for an approval rule template. The content cannot be null.
", + "refs": { + } + }, + "ApprovalRuleTemplateDescription": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateDescription": "The description of the approval rule template.
", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateDescription": "The description of the approval rule template. Consider providing a description that explains what this template does and when it might be appropriate to associate it with repositories.
", + "UpdateApprovalRuleTemplateDescriptionInput$approvalRuleTemplateDescription": "The updated description of the approval rule template.
" + } + }, + "ApprovalRuleTemplateDoesNotExistException": { + "base": "The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the AWS Region where the template was created, and then try again.
", + "refs": { + } + }, + "ApprovalRuleTemplateId": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateId": "The system-generated ID of the approval rule template.
", + "DeleteApprovalRuleTemplateOutput$approvalRuleTemplateId": "The system-generated ID of the deleted approval rule template. If the template has been previously deleted, the only response is a 200 OK.
", + "OriginApprovalRuleTemplate$approvalRuleTemplateId": "The ID of the template that created the approval rule.
" + } + }, + "ApprovalRuleTemplateInUseException": { + "base": "The approval rule template is associated with one or more repositories. You cannot delete a template that is associated with a repository. Remove all associations, and then try again.
", + "refs": { + } + }, + "ApprovalRuleTemplateName": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateName": "The name of the approval rule template.
", + "ApprovalRuleTemplateNameList$member": null, + "AssociateApprovalRuleTemplateWithRepositoryInput$approvalRuleTemplateName": "The name for the approval rule template.
", + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput$approvalRuleTemplateName": "The name of the template you want to associate with one or more repositories.
", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput$approvalRuleTemplateName": "The name of the template that you want to disassociate from one or more repositories.
", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateName": "The name of the approval rule template. Provide descriptive names, because this name is applied to the approval rules created automatically in associated repositories.
", + "DeleteApprovalRuleTemplateInput$approvalRuleTemplateName": "The name of the approval rule template to delete.
", + "DisassociateApprovalRuleTemplateFromRepositoryInput$approvalRuleTemplateName": "The name of the approval rule template to disassociate from a specified repository.
", + "GetApprovalRuleTemplateInput$approvalRuleTemplateName": "The name of the approval rule template for which you want to get information.
", + "ListRepositoriesForApprovalRuleTemplateInput$approvalRuleTemplateName": "The name of the approval rule template for which you want to list repositories that are associated with that template.
", + "OriginApprovalRuleTemplate$approvalRuleTemplateName": "The name of the template that created the approval rule.
", + "UpdateApprovalRuleTemplateContentInput$approvalRuleTemplateName": "The name of the approval rule template where you want to update the content of the rule.
", + "UpdateApprovalRuleTemplateDescriptionInput$approvalRuleTemplateName": "The name of the template for which you want to update the description.
", + "UpdateApprovalRuleTemplateNameInput$oldApprovalRuleTemplateName": "The current name of the approval rule template.
", + "UpdateApprovalRuleTemplateNameInput$newApprovalRuleTemplateName": "The new name you want to apply to the approval rule template.
" + } + }, + "ApprovalRuleTemplateNameAlreadyExistsException": { + "base": "You cannot create an approval rule template with that name because a template with that name already exists in this AWS Region for your AWS account. Approval rule template names must be unique.
", + "refs": { + } + }, + "ApprovalRuleTemplateNameList": { + "base": null, + "refs": { + "ListApprovalRuleTemplatesOutput$approvalRuleTemplateNames": "The names of all the approval rule templates found in the AWS Region for your AWS account.
", + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput$approvalRuleTemplateNames": "The names of all approval rule templates associated with the repository.
" + } + }, + "ApprovalRuleTemplateNameRequiredException": { + "base": "An approval rule template name is required, but was not specified.
", + "refs": { + } + }, + "ApprovalRulesList": { + "base": null, + "refs": { + "PullRequest$approvalRules": "The approval rules applied to the pull request.
" + } + }, + "ApprovalRulesNotSatisfiedList": { + "base": null, + "refs": { + "Evaluation$approvalRulesNotSatisfied": "The names of the approval rules that have not had their conditions met.
" + } + }, + "ApprovalRulesSatisfiedList": { + "base": null, + "refs": { + "Evaluation$approvalRulesSatisfied": "The names of the approval rules that have had their conditions met.
" + } + }, + "ApprovalState": { + "base": null, + "refs": { + "Approval$approvalState": "The state of the approval, APPROVE or REVOKE. REVOKE states are not stored.
", + "ApprovalStateChangedEventMetadata$approvalStatus": "The approval status for the pull request.
", + "UpdatePullRequestApprovalStateInput$approvalState": "The approval state to associate with the user on the pull request.
" + } + }, + "ApprovalStateChangedEventMetadata": { + "base": "Returns information about a change in the approval state for a pull request.
", + "refs": { + "PullRequestEvent$approvalStateChangedEventMetadata": "Information about an approval state change for a pull request.
" + } + }, + "ApprovalStateRequiredException": { + "base": "An approval state is required, but was not specified.
", + "refs": { + } + }, + "Approved": { + "base": null, + "refs": { + "Evaluation$approved": "Whether the state of the pull request is approved.
" } }, "Arn": { "base": null, "refs": { + "Approval$userArn": "The Amazon Resource Name (ARN) of the user.
", + "ApprovalRule$lastModifiedUser": "The Amazon Resource Name (ARN) of the user who made the most recent changes to the approval rule.
", + "ApprovalRuleTemplate$lastModifiedUser": "The Amazon Resource Name (ARN) of the user who made the most recent changes to the approval rule template.
", "Comment$authorArn": "The Amazon Resource Name (ARN) of the person who posted the comment.
", - "DescribePullRequestEventsInput$actorArn": "The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.
", + "DescribePullRequestEventsInput$actorArn": "The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with more commits or changing the status of a pull request.
", + "GetPullRequestOverrideStateOutput$overrider": "The Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request.
", "ListPullRequestsInput$authorArn": "Optional. The Amazon Resource Name (ARN) of the user who created the pull request. If used, this filters the results to pull requests created by that user.
", "MergeMetadata$mergedBy": "The Amazon Resource Name (ARN) of the user who merged the branches.
", "PullRequest$authorArn": "The Amazon Resource Name (ARN) of the user who created the pull request.
", - "PullRequestEvent$actorArn": "The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.
", + "PullRequestEvent$actorArn": "The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with more commits or changing the status of a pull request.
", "RepositoryMetadata$Arn": "The Amazon Resource Name (ARN) of the repository.
", - "RepositoryTrigger$destinationArn": "The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon SNS.
" + "RepositoryTrigger$destinationArn": "The ARN of the resource that is the target for a trigger (for example, the ARN of a topic in Amazon SNS).
" + } + }, + "AssociateApprovalRuleTemplateWithRepositoryInput": { + "base": null, + "refs": { } }, "AuthorDoesNotExistException": { @@ -93,8 +333,30 @@ "refs": { } }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesError": { + "base": "Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories operation.
", + "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList$member": null + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList": { + "base": null, + "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput$errors": "A list of any errors that might have occurred while attempting to create the association between the template and the repositories.
" + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput": { + "base": null, + "refs": { + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput": { + "base": null, + "refs": { + } + }, "BatchDescribeMergeConflictsError": { - "base": "Information about errors in a BatchDescribeMergeConflicts operation.
", + "base": "Returns information about errors in a BatchDescribeMergeConflicts operation.
", "refs": { "BatchDescribeMergeConflictsErrors$member": null } @@ -115,6 +377,28 @@ "refs": { } }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError": { + "base": "Returns information about errors in a BatchDisassociateApprovalRuleTemplateFromRepositories operation.
", + "refs": { + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList$member": null + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList": { + "base": null, + "refs": { + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput$errors": "A list of any errors that might have occurred while attempting to remove the association between the template and the repositories.
" + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput": { + "base": null, + "refs": { + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput": { + "base": null, + "refs": { + } + }, "BatchGetCommitsError": { "base": "Returns information about errors in a BatchGetCommits operation.
", "refs": { @@ -124,7 +408,7 @@ "BatchGetCommitsErrorsList": { "base": null, "refs": { - "BatchGetCommitsOutput$errors": "Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA or that commit was not found in the specified repository, the ID will return an error object with additional information.
" + "BatchGetCommitsOutput$errors": "Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA ID or that commit was not found in the specified repository, the ID returns an error object with more information.
" } }, "BatchGetCommitsInput": { @@ -158,7 +442,7 @@ } }, "BlobIdRequiredException": { - "base": "A blob ID is required but was not specified.
", + "base": "A blob ID is required, but was not specified.
", "refs": { } }, @@ -187,14 +471,14 @@ "BranchInfo$branchName": "The name of the branch.
", "BranchNameList$member": null, "CreateBranchInput$branchName": "The name of the new branch to create.
", - "CreateCommitInput$branchName": "The name of the branch where you will create the commit.
", + "CreateCommitInput$branchName": "The name of the branch where you create the commit.
", "DeleteBranchInput$branchName": "The name of the branch to delete.
", - "DeleteFileInput$branchName": "The name of the branch where the commit will be made deleting the file.
", + "DeleteFileInput$branchName": "The name of the branch where the commit that deletes the file is made.
", "GetBranchInput$branchName": "The name of the branch for which you want to retrieve information.
", - "MergeBranchesByFastForwardInput$targetBranch": "The branch where the merge will be applied.
", - "MergeBranchesBySquashInput$targetBranch": "The branch where the merge will be applied.
", - "MergeBranchesByThreeWayInput$targetBranch": "The branch where the merge will be applied.
", - "PutFileInput$branchName": "The name of the branch where you want to add or update the file. If this is an empty repository, this branch will be created.
", + "MergeBranchesByFastForwardInput$targetBranch": "The branch where the merge is applied.
", + "MergeBranchesBySquashInput$targetBranch": "The branch where the merge is applied.
", + "MergeBranchesByThreeWayInput$targetBranch": "The branch where the merge is applied.
", + "PutFileInput$branchName": "The name of the branch where you want to add or update the file. If this is an empty repository, this branch is created.
", "RepositoryMetadata$defaultBranch": "The repository's default branch name.
", "UpdateDefaultBranchInput$defaultBranchName": "The name of the branch to set as the default.
" } @@ -205,7 +489,7 @@ } }, "BranchNameIsTagNameException": { - "base": "The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.
", + "base": "The specified branch name is not valid because it is a tag name. Enter the name of a branch in the repository. For a list of valid branch names, use ListBranches.
", "refs": { } }, @@ -213,11 +497,21 @@ "base": null, "refs": { "ListBranchesOutput$branches": "The list of branch names.
", - "RepositoryTrigger$branches": "The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.
Although no content is required in the array, you must include the array itself.
The branches to be included in the trigger configuration. If you specify an empty array, the trigger applies to all branches.
Although no content is required in the array, you must include the array itself.
A branch name is required but was not specified.
", + "base": "A branch name is required, but was not specified.
", + "refs": { + } + }, + "CannotDeleteApprovalRuleFromTemplateException": { + "base": "The approval rule cannot be deleted from the pull request because it was created by an approval rule template and applied to the pull request automatically.
", + "refs": { + } + }, + "CannotModifyApprovalRuleFromTemplateException": { + "base": "The approval rule cannot be modified for the pull request because it was created by an approval rule template and applied to the pull request automatically.
", "refs": { } }, @@ -233,23 +527,23 @@ "base": null, "refs": { "Difference$changeType": "Whether the change type of the difference is an addition (A), deletion (D), or modification (M).
", - "MergeOperations$source": "The operation on a file (add, modify, or delete) of a file in the source of a merge or pull request.
", + "MergeOperations$source": "The operation (add, modify, or delete) on a file in the source of a merge or pull request.
", "MergeOperations$destination": "The operation on a file in the destination of a merge or pull request.
" } }, "ClientRequestToken": { "base": null, "refs": { - "Comment$clientRequestToken": "A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
", - "CreatePullRequestInput$clientRequestToken": "A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you do not have to generate an idempotency token, as this will be done for you.
A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
", - "PostCommentForPullRequestInput$clientRequestToken": "A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
", - "PostCommentReplyInput$clientRequestToken": "A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
", - "PullRequest$clientRequestToken": "A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
" + "Comment$clientRequestToken": "A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
", + "CreatePullRequestInput$clientRequestToken": "A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.
A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
", + "PostCommentForPullRequestInput$clientRequestToken": "A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
", + "PostCommentReplyInput$clientRequestToken": "A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
", + "PullRequest$clientRequestToken": "A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
" } }, "ClientRequestTokenRequiredException": { - "base": "A client request token is required. A client request token is an unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.
", + "base": "A client request token is required. A client request token is an unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.
", "refs": { } }, @@ -293,7 +587,7 @@ } }, "CommentDoesNotExistException": { - "base": "No comment exists with the provided ID. Verify that you have provided the correct ID, and then try again.
", + "base": "No comment exists with the provided ID. Verify that you have used the correct ID, and then try again.
", "refs": { } }, @@ -365,26 +659,26 @@ "base": null, "refs": { "BranchInfo$commitId": "The ID of the last commit made to the branch.
", - "CommentsForComparedCommit$beforeCommitId": "The full commit ID of the commit used to establish the 'before' of the comparison.
", - "CommentsForComparedCommit$afterCommitId": "The full commit ID of the commit used to establish the 'after' of the comparison.
", - "CommentsForPullRequest$beforeCommitId": "The full commit ID of the commit that was the tip of the destination branch when the pull request was created. This commit will be superceded by the after commit in the source branch when and if you merge the source branch into the destination branch.
", - "CommentsForPullRequest$afterCommitId": "he full commit ID of the commit that was the tip of the source branch at the time the comment was made.
", + "CommentsForComparedCommit$beforeCommitId": "The full commit ID of the commit used to establish the before of the comparison.
", + "CommentsForComparedCommit$afterCommitId": "The full commit ID of the commit used to establish the after of the comparison.
", + "CommentsForPullRequest$beforeCommitId": "The full commit ID of the commit that was the tip of the destination branch when the pull request was created. This commit is superceded by the after commit in the source branch when and if you merge the source branch into the destination branch.
", + "CommentsForPullRequest$afterCommitId": "The full commit ID of the commit that was the tip of the source branch at the time the comment was made.
", "CreateBranchInput$commitId": "The ID of the commit to point the new branch to.
", - "CreateCommitInput$parentCommitId": "The ID of the commit that is the parent of the commit you will create. If this is an empty repository, this is not required.
", - "DeleteFileInput$parentCommitId": "The ID of the commit that is the tip of the branch where you want to create the commit that will delete the file. This must be the HEAD commit for the branch. The commit that deletes the file will be created from this commit ID.
", - "GetCommentsForComparedCommitInput$beforeCommitId": "To establish the directionality of the comparison, the full commit ID of the 'before' commit.
", - "GetCommentsForComparedCommitInput$afterCommitId": "To establish the directionality of the comparison, the full commit ID of the 'after' commit.
", + "CreateCommitInput$parentCommitId": "The ID of the commit that is the parent of the commit you create. Not required if this is an empty repository.
", + "DeleteFileInput$parentCommitId": "The ID of the commit that is the tip of the branch where you want to create the commit that deletes the file. This must be the HEAD commit for the branch. The commit that deletes the file is created from this commit ID.
", + "GetCommentsForComparedCommitInput$beforeCommitId": "To establish the directionality of the comparison, the full commit ID of the before commit.
", + "GetCommentsForComparedCommitInput$afterCommitId": "To establish the directionality of the comparison, the full commit ID of the after commit.
", "GetCommentsForPullRequestInput$beforeCommitId": "The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.
", "GetCommentsForPullRequestInput$afterCommitId": "The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made.
", "MergeMetadata$mergeCommitId": "The commit ID for the merge commit, if any.
", - "PostCommentForComparedCommitInput$beforeCommitId": "To establish the directionality of the comparison, the full commit ID of the 'before' commit.
This is required for commenting on any commit unless that commit is the initial commit.
To establish the directionality of the comparison, the full commit ID of the 'after' commit.
", - "PostCommentForComparedCommitOutput$beforeCommitId": "In the directionality you established, the full commit ID of the 'before' commit.
", - "PostCommentForComparedCommitOutput$afterCommitId": "In the directionality you established, the full commit ID of the 'after' commit.
", + "PostCommentForComparedCommitInput$beforeCommitId": "To establish the directionality of the comparison, the full commit ID of the before commit. Required for commenting on any commit unless that commit is the initial commit.
", + "PostCommentForComparedCommitInput$afterCommitId": "To establish the directionality of the comparison, the full commit ID of the after commit.
", + "PostCommentForComparedCommitOutput$beforeCommitId": "In the directionality you established, the full commit ID of the before commit.
", + "PostCommentForComparedCommitOutput$afterCommitId": "In the directionality you established, the full commit ID of the after commit.
", "PostCommentForPullRequestInput$beforeCommitId": "The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.
", "PostCommentForPullRequestInput$afterCommitId": "The full commit ID of the commit in the source branch that is the current tip of the branch for the pull request when you post the comment.
", "PostCommentForPullRequestOutput$beforeCommitId": "The full commit ID of the commit in the source branch used to create the pull request, or in the case of an updated pull request, the full commit ID of the commit used to update the pull request.
", - "PostCommentForPullRequestOutput$afterCommitId": "The full commit ID of the commit in the destination branch where the pull request will be merged.
", + "PostCommentForPullRequestOutput$afterCommitId": "The full commit ID of the commit in the destination branch where the pull request is merged.
", "PullRequestCreatedEventMetadata$sourceCommitId": "The commit ID on the source branch used when the pull request was created.
", "PullRequestCreatedEventMetadata$destinationCommitId": "The commit ID of the tip of the branch specified as the destination branch when the pull request was created.
", "PullRequestCreatedEventMetadata$mergeBase": "The commit ID of the most recent commit that the source branch and the destination branch have in common.
", @@ -392,9 +686,9 @@ "PullRequestSourceReferenceUpdatedEventMetadata$afterCommitId": "The full commit ID of the commit in the source branch that was the tip of the branch at the time the pull request was updated.
", "PullRequestSourceReferenceUpdatedEventMetadata$mergeBase": "The commit ID of the most recent commit that the source branch and the destination branch have in common.
", "PullRequestTarget$destinationCommit": "The full commit ID that is the tip of the destination branch. This is the commit where the pull request was or will be merged.
", - "PullRequestTarget$sourceCommit": "The full commit ID of the tip of the source branch used to create the pull request. If the pull request branch is updated by a push while the pull request is open, the commit ID will change to reflect the new tip of the branch.
", + "PullRequestTarget$sourceCommit": "The full commit ID of the tip of the source branch used to create the pull request. If the pull request branch is updated by a push while the pull request is open, the commit ID changes to reflect the new tip of the branch.
", "PullRequestTarget$mergeBase": "The commit ID of the most recent commit that the source branch and the destination branch have in common.
", - "PutFileInput$parentCommitId": "The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.
The commit ID must match the ID of the head commit at the time of the operation, or an error will occur, and the file will not be added or updated.
" + "PutFileInput$parentCommitId": "The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.
The commit ID must match the ID of the head commit at the time of the operation. Otherwise, an error occurs, and the file is not added or updated.
" } }, "CommitIdDoesNotExistException": { @@ -410,7 +704,7 @@ "CommitIdsInputList": { "base": null, "refs": { - "BatchGetCommitsInput$commitIds": "The full commit IDs of the commits to get information about.
You must supply the full SHAs of each commit. You cannot use shortened SHAs.
The full commit IDs of the commits to get information about.
You must supply the full SHA IDs of each commit. You cannot use shortened SHA IDs.
A list of commit IDs is required, but was either not specified or the list was empty.
", "refs": { } }, @@ -431,28 +725,28 @@ "CommitName": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "BatchDescribeMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "CreateUnreferencedMergeCommitInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "CreateUnreferencedMergeCommitInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "DescribeMergeConflictsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "DescribeMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetDifferencesInput$beforeCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, the full commit ID. Optional. If not specified, all changes prior to the afterCommitSpecifier
value will be shown. If you do not use beforeCommitSpecifier
in your request, consider limiting the results with maxResults
.
The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "BatchDescribeMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "CreateUnreferencedMergeCommitInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "CreateUnreferencedMergeCommitInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "DescribeMergeConflictsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "DescribeMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetDifferencesInput$beforeCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, the full commit ID). Optional. If not specified, all changes before the afterCommitSpecifier
value are shown. If you do not use beforeCommitSpecifier
in your request, consider limiting the results with maxResults
.
The branch, tag, HEAD, or other fully qualified reference used to identify a commit.
", - "GetFileInput$commitSpecifier": "The fully-quaified reference that identifies the commit that contains the file. For example, you could specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, then the head commit will be used.
", - "GetFolderInput$commitSpecifier": "A fully-qualified reference used to identify a commit that contains the version of the folder's content to return. A fully-qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content will be returned as it exists in the HEAD commit.
", - "GetMergeCommitInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetMergeCommitInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetMergeConflictsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetMergeOptionsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "GetMergeOptionsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesByFastForwardInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesByFastForwardInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesBySquashInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesBySquashInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesByThreeWayInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
", - "MergeBranchesByThreeWayInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.
" + "GetFileInput$commitSpecifier": "The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, the head commit is used.
", + "GetFolderInput$commitSpecifier": "A fully qualified reference used to identify a commit that contains the version of the folder's content to return. A fully qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content is returned as it exists in the HEAD commit.
", + "GetMergeCommitInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetMergeCommitInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetMergeConflictsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetMergeConflictsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetMergeOptionsInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "GetMergeOptionsInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesByFastForwardInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesByFastForwardInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesBySquashInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesBySquashInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesByThreeWayInput$sourceCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
", + "MergeBranchesByThreeWayInput$destinationCommitSpecifier": "The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).
" } }, "CommitObjectsList": { @@ -480,16 +774,16 @@ "ConflictDetailLevelTypeEnum": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "CreateUnreferencedMergeCommitInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "DescribeMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "GetMergeCommitInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "GetMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "GetMergeOptionsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "MergeBranchesBySquashInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "MergeBranchesByThreeWayInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "MergePullRequestBySquashInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
", - "MergePullRequestByThreeWayInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.
" + "BatchDescribeMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "CreateUnreferencedMergeCommitInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "DescribeMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "GetMergeCommitInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "GetMergeConflictsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "GetMergeOptionsInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "MergeBranchesBySquashInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "MergeBranchesByThreeWayInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "MergePullRequestBySquashInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
", + "MergePullRequestByThreeWayInput$conflictDetailLevel": "The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.
" } }, "ConflictMetadata": { @@ -503,32 +797,32 @@ "ConflictMetadataList": { "base": null, "refs": { - "GetMergeConflictsOutput$conflictMetadataList": "A list of metadata for any conflicting files. If the specified merge strategy is FAST_FORWARD_MERGE, this list will always be empty.
" + "GetMergeConflictsOutput$conflictMetadataList": "A list of metadata for any conflicting files. If the specified merge strategy is FAST_FORWARD_MERGE, this list is always empty.
" } }, "ConflictResolution": { - "base": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
", + "base": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
", "refs": { - "CreateUnreferencedMergeCommitInput$conflictResolution": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
", - "MergeBranchesBySquashInput$conflictResolution": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
", - "MergeBranchesByThreeWayInput$conflictResolution": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
", - "MergePullRequestBySquashInput$conflictResolution": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
", - "MergePullRequestByThreeWayInput$conflictResolution": "A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.
" + "CreateUnreferencedMergeCommitInput$conflictResolution": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
", + "MergeBranchesBySquashInput$conflictResolution": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
", + "MergeBranchesByThreeWayInput$conflictResolution": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
", + "MergePullRequestBySquashInput$conflictResolution": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
", + "MergePullRequestByThreeWayInput$conflictResolution": "If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.
" } }, "ConflictResolutionStrategyTypeEnum": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "CreateUnreferencedMergeCommitInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "DescribeMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "GetMergeCommitInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "GetMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "GetMergeOptionsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "MergeBranchesBySquashInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "MergeBranchesByThreeWayInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "MergePullRequestBySquashInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
", - "MergePullRequestByThreeWayInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.
" + "BatchDescribeMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "CreateUnreferencedMergeCommitInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "DescribeMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "GetMergeCommitInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "GetMergeConflictsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "GetMergeOptionsInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "MergeBranchesBySquashInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "MergeBranchesByThreeWayInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "MergePullRequestBySquashInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
", + "MergePullRequestByThreeWayInput$conflictResolutionStrategy": "Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.
" } }, "Conflicts": { @@ -544,7 +838,17 @@ "PostCommentForComparedCommitInput$content": "The content of the comment you want to make.
", "PostCommentForPullRequestInput$content": "The content of your comment on the change.
", "PostCommentReplyInput$content": "The contents of your reply to a comment.
", - "UpdateCommentInput$content": "The updated content with which you want to replace the existing content of the comment.
" + "UpdateCommentInput$content": "The updated content to replace the existing content of the comment.
" + } + }, + "CreateApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "CreateApprovalRuleTemplateOutput": { + "base": null, + "refs": { } }, "CreateBranchInput": { @@ -562,6 +866,16 @@ "refs": { } }, + "CreatePullRequestApprovalRuleInput": { + "base": null, + "refs": { + } + }, + "CreatePullRequestApprovalRuleOutput": { + "base": null, + "refs": { + } + }, "CreatePullRequestInput": { "base": null, "refs": { @@ -595,6 +909,8 @@ "CreationDate": { "base": null, "refs": { + "ApprovalRule$creationDate": "The date the approval rule was created, in timestamp format.
", + "ApprovalRuleTemplate$creationDate": "The date the approval rule template was created, in timestamp format.
", "Comment$creationDate": "The date and time the comment was created, in timestamp format.
", "PullRequest$creationDate": "The date and time the pull request was originally created, in timestamp format.
", "RepositoryMetadata$creationDate": "The date and time the repository was created, in timestamp format.
" @@ -611,6 +927,16 @@ "refs": { } }, + "DeleteApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "DeleteApprovalRuleTemplateOutput": { + "base": null, + "refs": { + } + }, "DeleteBranchInput": { "base": "Represents the input of a delete branch operation.
", "refs": { @@ -634,12 +960,12 @@ "DeleteFileEntries": { "base": null, "refs": { - "ConflictResolution$deleteFiles": "Files that will be deleted as part of the merge conflict resolution.
", - "CreateCommitInput$deleteFiles": "The files to delete in this commit. These files will still exist in prior commits.
" + "ConflictResolution$deleteFiles": "Files to be deleted as part of the merge conflict resolution.
", + "CreateCommitInput$deleteFiles": "The files to delete in this commit. These files still exist in earlier commits.
" } }, "DeleteFileEntry": { - "base": "A file that will be deleted as part of a commit.
", + "base": "A file that is deleted as part of a commit.
", "refs": { "DeleteFileEntries$member": null } @@ -654,6 +980,16 @@ "refs": { } }, + "DeletePullRequestApprovalRuleInput": { + "base": null, + "refs": { + } + }, + "DeletePullRequestApprovalRuleOutput": { + "base": null, + "refs": { + } + }, "DeleteRepositoryInput": { "base": "Represents the input of a delete repository operation.
", "refs": { @@ -689,7 +1025,7 @@ "refs": { "CreatePullRequestInput$description": "A description of the pull request.
", "PullRequest$description": "The user-defined description of the pull request. This description can be used to clarify what should be reviewed and other details of the request.
", - "UpdatePullRequestDescriptionInput$description": "The updated content of the description for the pull request. This content will replace the existing description.
" + "UpdatePullRequestDescriptionInput$description": "The updated content of the description for the pull request. This content replaces the existing description.
" } }, "Difference": { @@ -701,7 +1037,7 @@ "DifferenceList": { "base": null, "refs": { - "GetDifferencesOutput$differences": "A differences data type object that contains information about the differences, including whether the difference is added, modified, or deleted (A, D, M).
" + "GetDifferencesOutput$differences": "A data type object that contains information about the differences, including whether the difference is added, modified, or deleted (A, D, M).
" } }, "DirectoryNameConflictsWithFileNameException": { @@ -709,16 +1045,21 @@ "refs": { } }, + "DisassociateApprovalRuleTemplateFromRepositoryInput": { + "base": null, + "refs": { + } + }, "Email": { "base": null, "refs": { "CreateCommitInput$email": "The email address of the person who created the commit.
", "CreateUnreferencedMergeCommitInput$email": "The email address for the person who created the unreferenced commit.
", - "DeleteFileInput$email": "The email address for the commit that deletes the file. If no email address is specified, the email address will be left blank.
", - "MergeBranchesBySquashInput$email": "The email address of the person merging the branches. This information will be used in the commit information for the merge.
", - "MergeBranchesByThreeWayInput$email": "The email address of the person merging the branches. This information will be used in the commit information for the merge.
", - "MergePullRequestBySquashInput$email": "The email address of the person merging the branches. This information will be used in the commit information for the merge.
", - "MergePullRequestByThreeWayInput$email": "The email address of the person merging the branches. This information will be used in the commit information for the merge.
", + "DeleteFileInput$email": "The email address for the commit that deletes the file. If no email address is specified, the email address is left blank.
", + "MergeBranchesBySquashInput$email": "The email address of the person merging the branches. This information is used in the commit information for the merge.
", + "MergeBranchesByThreeWayInput$email": "The email address of the person merging the branches. This information is used in the commit information for the merge.
", + "MergePullRequestBySquashInput$email": "The email address of the person merging the branches. This information is used in the commit information for the merge.
", + "MergePullRequestByThreeWayInput$email": "The email address of the person merging the branches. This information is used in the commit information for the merge.
", "PutFileInput$email": "An email address for the person adding or updating the file.
", "UserInfo$email": "The email address associated with the user who made the commit, if any.
" } @@ -751,15 +1092,35 @@ "ErrorCode": { "base": null, "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$errorCode": "An error code that specifies whether the repository name was not valid or not found.
", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$errorCode": "An error code that specifies whether the repository name was not valid or not found.
", "BatchGetCommitsError$errorCode": "An error code that specifies whether the commit ID was not valid or not found.
" } }, "ErrorMessage": { "base": null, "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$errorMessage": "An error message that provides details about why the repository name was not found or not valid.
", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$errorMessage": "An error message that provides details about why the repository name was either not found or not valid.
", "BatchGetCommitsError$errorMessage": "An error message that provides detail about why the commit ID either was not found or was not valid.
" } }, + "EvaluatePullRequestApprovalRulesInput": { + "base": null, + "refs": { + } + }, + "EvaluatePullRequestApprovalRulesOutput": { + "base": null, + "refs": { + } + }, + "Evaluation": { + "base": "Returns information about the approval rules applied to a pull request and whether conditions have been met.
", + "refs": { + "EvaluatePullRequestApprovalRulesOutput$evaluation": "The result of the evaluation, including the names of the rules whose conditions have been met (if any), the names of the rules whose conditions have not been met (if any), whether the pull request is in the approved state, and whether the pull request approval rule has been set aside by an override.
" + } + }, "EventDate": { "base": null, "refs": { @@ -788,7 +1149,7 @@ } }, "FileContentAndSourceFileSpecifiedException": { - "base": "The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file, or provide the file content directly.
", + "base": "The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file or provide the file content directly.
", "refs": { } }, @@ -798,12 +1159,12 @@ } }, "FileContentSizeLimitExceededException": { - "base": "The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.
", + "base": "The file cannot be added because it is too large. The maximum file size is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.
", "refs": { } }, "FileDoesNotExistException": { - "base": "The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.
", + "base": "The specified file does not exist. Verify that you have used the correct file name, full path, and extension.
", "refs": { } }, @@ -815,17 +1176,17 @@ "FileList": { "base": null, "refs": { - "GetFolderOutput$files": "The list of files that exist in the specified folder, if any.
" + "GetFolderOutput$files": "The list of files in the specified folder, if any.
" } }, "FileMetadata": { - "base": "A file that will be added, updated, or deleted as part of a commit.
", + "base": "A file to be added, updated, or deleted as part of a commit.
", "refs": { "FilesMetadata$member": null } }, "FileModeRequiredException": { - "base": "The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.
", + "base": "The commit cannot be created because no file mode has been specified. A file mode is required to update mode permissions for a file.
", "refs": { } }, @@ -837,9 +1198,9 @@ "FileModes$source": "The file mode of a file in the source of a merge or pull request.
", "FileModes$destination": "The file mode of a file in the destination of a merge or pull request.
", "FileModes$base": "The file mode of a file in the base of a merge or pull request.
", - "GetFileOutput$fileMode": "The extrapolated file mode permissions of the blob. Valid values include strings such as EXECUTABLE and not numeric values.
The file mode permissions returned by this API are not the standard file mode permission values, such as 100644, but rather extrapolated values. See below for a full list of supported return values.
The extrapolated file mode permissions of the blob. Valid values include strings such as EXECUTABLE and not numeric values.
The file mode permissions returned by this API are not the standard file mode permission values, such as 100644, but rather extrapolated values. See the supported return values.
The extrapolated file mode permissions for the file. Valid values include EXECUTABLE and NORMAL.
", - "PutFileInput$fileMode": "The file mode permissions of the blob. Valid file mode permissions are listed below.
", + "PutFileInput$fileMode": "The file mode permissions of the blob. Valid file mode permissions are listed here.
", "ReplaceContentEntry$fileMode": "The file mode to apply during conflict resoltion.
", "SetFileModeEntry$fileMode": "The file mode for the file.
", "SymbolicLink$fileMode": "The file mode permissions of the blob that cotains information about the symbolic link.
" @@ -906,14 +1267,24 @@ } }, "FolderDoesNotExistException": { - "base": "The specified folder does not exist. Either the folder name is not correct, or you did not provide the full path to the folder.
", + "base": "The specified folder does not exist. Either the folder name is not correct, or you did not enter the full path to the folder.
", "refs": { } }, "FolderList": { "base": null, "refs": { - "GetFolderOutput$subFolders": "The list of folders that exist beneath the specified folder, if any.
" + "GetFolderOutput$subFolders": "The list of folders that exist under the specified folder, if any.
" + } + }, + "GetApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "GetApprovalRuleTemplateOutput": { + "base": null, + "refs": { } }, "GetBlobInput": { @@ -1036,6 +1407,16 @@ "refs": { } }, + "GetPullRequestApprovalStatesInput": { + "base": null, + "refs": { + } + }, + "GetPullRequestApprovalStatesOutput": { + "base": null, + "refs": { + } + }, "GetPullRequestInput": { "base": null, "refs": { @@ -1046,6 +1427,16 @@ "refs": { } }, + "GetPullRequestOverrideStateInput": { + "base": null, + "refs": { + } + }, + "GetPullRequestOverrideStateOutput": { + "base": null, + "refs": { + } + }, "GetRepositoryInput": { "base": "Represents the input of a get repository operation.
", "refs": { @@ -1069,11 +1460,11 @@ "HunkContent": { "base": null, "refs": { - "MergeHunkDetail$hunkContent": "The base-64 encoded content of the hunk merged region that might or might not contain a conflict.
" + "MergeHunkDetail$hunkContent": "The base-64 encoded content of the hunk merged region that might contain a conflict.
" } }, "IdempotencyParameterMismatchException": { - "base": "The client request token is not valid. Either the token is not in a valid format, or the token has been used in a previous request and cannot be re-used.
", + "base": "The client request token is not valid. Either the token is not in a valid format, or the token has been used in a previous request and cannot be reused.
", "refs": { } }, @@ -1082,6 +1473,36 @@ "refs": { } }, + "InvalidApprovalRuleContentException": { + "base": "The content for the approval rule is not valid.
", + "refs": { + } + }, + "InvalidApprovalRuleNameException": { + "base": "The name for the approval rule is not valid.
", + "refs": { + } + }, + "InvalidApprovalRuleTemplateContentException": { + "base": "The content of the approval rule template is not valid.
", + "refs": { + } + }, + "InvalidApprovalRuleTemplateDescriptionException": { + "base": "The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.
", + "refs": { + } + }, + "InvalidApprovalRuleTemplateNameException": { + "base": "The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.
", + "refs": { + } + }, + "InvalidApprovalStateException": { + "base": "The state for the approval is not valid. Valid values include APPROVE and REVOKE.
", + "refs": { + } + }, "InvalidAuthorArnException": { "base": "The Amazon Resource Name (ARN) is not valid. Make sure that you have provided the full ARN for the author of the pull request, and then try again.
", "refs": { @@ -1143,7 +1564,7 @@ } }, "InvalidDescriptionException": { - "base": "The pull request description is not valid. Descriptions are limited to 1,000 characters in length.
", + "base": "The pull request description is not valid. Descriptions cannot be more than 1,000 characters.
", "refs": { } }, @@ -1158,7 +1579,7 @@ } }, "InvalidFileLocationException": { - "base": "The location of the file is not valid. Make sure that you include the extension of the file as well as the file name.
", + "base": "The location of the file is not valid. Make sure that you include the file name and extension.
", "refs": { } }, @@ -1197,6 +1618,11 @@ "refs": { } }, + "InvalidOverrideStatusException": { + "base": "The override status is not valid. Valid statuses are OVERRIDE and REVOKE.
", + "refs": { + } + }, "InvalidParentCommitIdException": { "base": "The parent commit ID is not valid. The commit ID cannot be empty, and must match the head commit ID for the branch of the repository where you want to add or update a file.
", "refs": { @@ -1228,7 +1654,7 @@ } }, "InvalidReferenceNameException": { - "base": "The specified reference name format is not valid. Reference names must conform to the Git references format, for example refs/heads/master. For more information, see Git Internals - Git References or consult your Git documentation.
", + "base": "The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/master). For more information, see Git Internals - Git References or consult your Git documentation.
", "refs": { } }, @@ -1253,7 +1679,7 @@ } }, "InvalidRepositoryNameException": { - "base": "At least one specified repository name is not valid.
This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.
A specified repository name is not valid.
This exception occurs only when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.
The region for the trigger target does not match the region for the repository. Triggers must be created in the same region as the target for the trigger.
", + "base": "The AWS Region for the trigger target does not match the AWS Region for the repository. Triggers must be created in the same Region as the target for the trigger.
", "refs": { } }, @@ -1292,6 +1718,16 @@ "refs": { } }, + "InvalidRevisionIdException": { + "base": "The revision ID is not valid. Use GetPullRequest to determine the value.
", + "refs": { + } + }, + "InvalidRuleContentSha256Exception": { + "base": "The SHA-256 hash signature for the rule content is not valid.
", + "refs": { + } + }, "InvalidSortByException": { "base": "The specified sort by value is not valid.
", "refs": { @@ -1364,7 +1800,7 @@ "IsHunkConflict": { "base": null, "refs": { - "MergeHunk$isConflict": "A Boolean value indicating whether a combination of hunks contains a conflict. Conflicts occur when the same file or the same lines in a file were modified in both the source and destination of a merge or pull request. Valid values include true, false, and null. This will be true when the hunk represents a conflict and one or more files contains a line conflict. File mode conflicts in a merge will not set this to be true.
" + "MergeHunk$isConflict": "A Boolean value indicating whether a combination of hunks contains a conflict. Conflicts occur when the same file or the same lines in a file were modified in both the source and destination of a merge or pull request. Valid values include true, false, and null. True when the hunk represents a conflict and one or more files contains a line conflict. File mode conflicts in a merge do not set this to true.
" } }, "IsMergeable": { @@ -1394,18 +1830,20 @@ "KeepEmptyFolders": { "base": null, "refs": { - "CreateCommitInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
", - "CreateUnreferencedMergeCommitInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
", - "DeleteFileInput$keepEmptyFolders": "Specifies whether to delete the folder or directory that contains the file you want to delete if that file is the only object in the folder or directory. By default, empty folders will be deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 will also delete the empty folders dir4, dir3, and dir2.
", - "MergeBranchesBySquashInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
", - "MergeBranchesByThreeWayInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
", - "MergePullRequestBySquashInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
", - "MergePullRequestByThreeWayInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.
" + "CreateCommitInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a ..gitkeep file is created for empty folders. The default is false.
", + "CreateUnreferencedMergeCommitInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file is created for empty folders. The default is false.
", + "DeleteFileInput$keepEmptyFolders": "If a file is the only object in the folder or directory, specifies whether to delete the folder or directory that contains the file. By default, empty folders are deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 also deletes the empty folders dir4, dir3, and dir2.
", + "MergeBranchesBySquashInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file is created for empty folders. The default is false.
", + "MergeBranchesByThreeWayInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.
", + "MergePullRequestBySquashInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.
", + "MergePullRequestByThreeWayInput$keepEmptyFolders": "If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.
" } }, "LastModifiedDate": { "base": null, "refs": { + "ApprovalRule$lastModifiedDate": "The date the approval rule was most recently changed, in timestamp format.
", + "ApprovalRuleTemplate$lastModifiedDate": "The date the approval rule template was most recently changed, in timestamp format.
", "Comment$lastModifiedDate": "The date and time the comment was most recently modified, in timestamp format.
", "PullRequest$lastActivityDate": "The day and time of the last user or system activity on the pull request, in timestamp format.
", "RepositoryMetadata$lastModifiedDate": "The date and time the repository was last modified, in timestamp format.
" @@ -1414,7 +1852,7 @@ "Limit": { "base": null, "refs": { - "GetDifferencesInput$MaxResults": "A non-negative integer used to limit the number of returned results.
" + "GetDifferencesInput$MaxResults": "A non-zero, non-negative integer used to limit the number of returned results.
" } }, "LineNumber": { @@ -1424,6 +1862,26 @@ "MergeHunkDetail$endLine": "The end position of the hunk in the merge result.
" } }, + "ListApprovalRuleTemplatesInput": { + "base": null, + "refs": { + } + }, + "ListApprovalRuleTemplatesOutput": { + "base": null, + "refs": { + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryInput": { + "base": null, + "refs": { + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput": { + "base": null, + "refs": { + } + }, "ListBranchesInput": { "base": "Represents the input of a list branches operation.
", "refs": { @@ -1444,6 +1902,16 @@ "refs": { } }, + "ListRepositoriesForApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "ListRepositoriesForApprovalRuleTemplateOutput": { + "base": null, + "refs": { + } + }, "ListRepositoriesInput": { "base": "Represents the input of a list repositories operation.
", "refs": { @@ -1467,11 +1935,11 @@ "Location": { "base": "Returns information about the location of a change or comment in the comparison between two commits or a pull request.
", "refs": { - "CommentsForComparedCommit$location": "Location information about the comment on the comparison, including the file name, line number, and whether the version of the file where the comment was made is 'BEFORE' or 'AFTER'.
", - "CommentsForPullRequest$location": "Location information about the comment on the pull request, including the file name, line number, and whether the version of the file where the comment was made is 'BEFORE' (destination branch) or 'AFTER' (source branch).
", + "CommentsForComparedCommit$location": "Location information about the comment on the comparison, including the file name, line number, and whether the version of the file where the comment was made is BEFORE or AFTER.
", + "CommentsForPullRequest$location": "Location information about the comment on the pull request, including the file name, line number, and whether the version of the file where the comment was made is BEFORE (destination branch) or AFTER (source branch).
", "PostCommentForComparedCommitInput$location": "The location of the comparison where you want to comment.
", "PostCommentForComparedCommitOutput$location": "The location of the comment in the comparison between the two commits.
", - "PostCommentForPullRequestInput$location": "The location of the change where you want to post your comment. If no location is provided, the comment will be posted as a general comment on the pull request difference between the before commit ID and the after commit ID.
", + "PostCommentForPullRequestInput$location": "The location of the change where you want to post your comment. If no location is provided, the comment is posted as a general comment on the pull request difference between the before commit ID and the after commit ID.
", "PostCommentForPullRequestOutput$location": "The location of the change where you posted your comment.
" } }, @@ -1486,11 +1954,14 @@ "BatchDescribeMergeConflictsInput$maxMergeHunks": "The maximum number of merge hunks to include in the output.
", "BatchDescribeMergeConflictsInput$maxConflictFiles": "The maximum number of files to include in the output.
", "DescribeMergeConflictsInput$maxMergeHunks": "The maximum number of merge hunks to include in the output.
", - "DescribePullRequestEventsInput$maxResults": "A non-negative integer used to limit the number of returned results. The default is 100 events, which is also the maximum number of events that can be returned in a result.
", - "GetCommentsForComparedCommitInput$maxResults": "A non-negative integer used to limit the number of returned results. The default is 100 comments, and is configurable up to 500.
", - "GetCommentsForPullRequestInput$maxResults": "A non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request.
", + "DescribePullRequestEventsInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results. The default is 100 events, which is also the maximum number of events that can be returned in a result.
", + "GetCommentsForComparedCommitInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results. The default is 100 comments, but you can configure up to 500.
", + "GetCommentsForPullRequestInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request.
", "GetMergeConflictsInput$maxConflictFiles": "The maximum number of files to include in the output.
", - "ListPullRequestsInput$maxResults": "A non-negative integer used to limit the number of returned results.
" + "ListApprovalRuleTemplatesInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results.
", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results.
", + "ListPullRequestsInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results.
", + "ListRepositoriesForApprovalRuleTemplateInput$maxResults": "A non-zero, non-negative integer used to limit the number of returned results.
" } }, "MaximumBranchesExceededException": { @@ -1514,7 +1985,12 @@ } }, "MaximumItemsToCompareExceededException": { - "base": "The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.
", + "base": "The number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.
", + "refs": { + } + }, + "MaximumNumberOfApprovalsExceededException": { + "base": "The number of approvals required for the approval rule exceeds the maximum number allowed.
", "refs": { } }, @@ -1524,7 +2000,7 @@ } }, "MaximumRepositoryNamesExceededException": { - "base": "The maximum number of allowed repository names was exceeded. Currently, this number is 25.
", + "base": "The maximum number of allowed repository names was exceeded. Currently, this number is 100.
", "refs": { } }, @@ -1533,6 +2009,11 @@ "refs": { } }, + "MaximumRuleTemplatesAssociatedWithRepositoryException": { + "base": "The maximum number of approval rule templates for a repository has been exceeded. You cannot associate more than 25 approval rule templates with a repository.
", + "refs": { + } + }, "MergeBranchesByFastForwardInput": { "base": null, "refs": { @@ -1654,14 +2135,14 @@ "refs": { "BatchDescribeMergeConflictsError$message": "The message provided by the exception.
", "Commit$message": "The commit message associated with the specified commit.
", - "CreateCommitInput$commitMessage": "The commit message you want to include as part of creating the commit. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.
", + "CreateCommitInput$commitMessage": "The commit message you want to include in the commit. Commit messages are limited to 256 KB. If no message is specified, a default message is used.
", "CreateUnreferencedMergeCommitInput$commitMessage": "The commit message for the unreferenced commit.
", - "DeleteFileInput$commitMessage": "The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.
", + "DeleteFileInput$commitMessage": "The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message is used.
", "MergeBranchesBySquashInput$commitMessage": "The commit message for the merge.
", "MergeBranchesByThreeWayInput$commitMessage": "The commit message to include in the commit information for the merge.
", "MergePullRequestBySquashInput$commitMessage": "The commit message to include in the commit information for the merge.
", "MergePullRequestByThreeWayInput$commitMessage": "The commit message to include in the commit information for the merge.
", - "PutFileInput$commitMessage": "A message about why this file was added or updated. While optional, adding a message is strongly encouraged in order to provide a more useful commit history for your repository.
" + "PutFileInput$commitMessage": "A message about why this file was added or updated. Although it is optional, a message makes the commit history for your repository more useful.
" } }, "Mode": { @@ -1683,14 +2164,14 @@ "Name": { "base": null, "refs": { - "CreateCommitInput$authorName": "The name of the author who created the commit. This information will be used as both the author and committer for the commit.
", - "CreateUnreferencedMergeCommitInput$authorName": "The name of the author who created the unreferenced commit. This information will be used as both the author and committer for the commit.
", - "DeleteFileInput$name": "The name of the author of the commit that deletes the file. If no name is specified, the user's ARN will be used as the author name and committer name.
", - "MergeBranchesBySquashInput$authorName": "The name of the author who created the commit. This information will be used as both the author and committer for the commit.
", - "MergeBranchesByThreeWayInput$authorName": "The name of the author who created the commit. This information will be used as both the author and committer for the commit.
", - "MergePullRequestBySquashInput$authorName": "The name of the author who created the commit. This information will be used as both the author and committer for the commit.
", - "MergePullRequestByThreeWayInput$authorName": "The name of the author who created the commit. This information will be used as both the author and committer for the commit.
", - "PutFileInput$name": "The name of the person adding or updating the file. While optional, adding a name is strongly encouraged in order to provide a more useful commit history for your repository.
", + "CreateCommitInput$authorName": "The name of the author who created the commit. This information is used as both the author and committer for the commit.
", + "CreateUnreferencedMergeCommitInput$authorName": "The name of the author who created the unreferenced commit. This information is used as both the author and committer for the commit.
", + "DeleteFileInput$name": "The name of the author of the commit that deletes the file. If no name is specified, the user's ARN is used as the author name and committer name.
", + "MergeBranchesBySquashInput$authorName": "The name of the author who created the commit. This information is used as both the author and committer for the commit.
", + "MergeBranchesByThreeWayInput$authorName": "The name of the author who created the commit. This information is used as both the author and committer for the commit.
", + "MergePullRequestBySquashInput$authorName": "The name of the author who created the commit. This information is used as both the author and committer for the commit.
", + "MergePullRequestByThreeWayInput$authorName": "The name of the author who created the commit. This information is used as both the author and committer for the commit.
", + "PutFileInput$name": "The name of the person adding or updating the file. Although it is optional, a name makes the commit history for your repository more useful.
", "UserInfo$name": "The name of the user who made the specified commit.
" } }, @@ -1702,27 +2183,33 @@ "NextToken": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "BatchDescribeMergeConflictsInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "BatchDescribeMergeConflictsOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", - "DescribeMergeConflictsInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "DescribeMergeConflictsInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "DescribeMergeConflictsOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", - "DescribePullRequestEventsInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "DescribePullRequestEventsInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "DescribePullRequestEventsOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", "GetCommentsForComparedCommitInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", "GetCommentsForComparedCommitOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", - "GetCommentsForPullRequestInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "GetCommentsForPullRequestInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "GetCommentsForPullRequestOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", - "GetDifferencesInput$NextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "GetDifferencesInput$NextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "GetDifferencesOutput$NextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", - "GetMergeConflictsInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "GetMergeConflictsInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "GetMergeConflictsOutput$nextToken": "An enumeration token that can be used in a request to return the next batch of the results.
", + "ListApprovalRuleTemplatesInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", + "ListApprovalRuleTemplatesOutput$nextToken": "An enumeration token that allows the operation to batch the next results of the operation.
", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput$nextToken": "An enumeration token that allows the operation to batch the next results of the operation.
", "ListBranchesInput$nextToken": "An enumeration token that allows the operation to batch the results.
", "ListBranchesOutput$nextToken": "An enumeration token that returns the batch of the results.
", - "ListPullRequestsInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", - "ListPullRequestsOutput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "ListPullRequestsInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", + "ListPullRequestsOutput$nextToken": "An enumeration token that allows the operation to batch the next results of the operation.
", + "ListRepositoriesForApprovalRuleTemplateInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", + "ListRepositoriesForApprovalRuleTemplateOutput$nextToken": "An enumeration token that allows the operation to batch the next results of the operation.
", "ListRepositoriesInput$nextToken": "An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.
", "ListRepositoriesOutput$nextToken": "An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.
", - "ListTagsForResourceInput$nextToken": "An enumeration token that when provided in a request, returns the next batch of the results.
", + "ListTagsForResourceInput$nextToken": "An enumeration token that, when provided in a request, returns the next batch of the results.
", "ListTagsForResourceOutput$nextToken": "An enumeration token that allows the operation to batch the next results of the operation.
" } }, @@ -1737,6 +2224,16 @@ "ConflictMetadata$numberOfConflicts": "The number of conflicts, including both hunk conflicts and metadata conflicts.
" } }, + "NumberOfRuleTemplatesExceededException": { + "base": "The maximum number of approval rule templates has been exceeded for this AWS Region.
", + "refs": { + } + }, + "NumberOfRulesExceededException": { + "base": "The approval rule cannot be added. The pull request has the maximum number of approval rules associated with it.
", + "refs": { + } + }, "ObjectId": { "base": null, "refs": { @@ -1745,11 +2242,11 @@ "BatchDescribeMergeConflictsOutput$baseCommitId": "The commit ID of the merge base.
", "BatchGetCommitsError$commitId": "A commit ID that either could not be found or was not in a valid format.
", "BlobMetadata$blobId": "The full ID of the blob.
", - "CommentsForComparedCommit$beforeBlobId": "The full blob ID of the commit used to establish the 'before' of the comparison.
", - "CommentsForComparedCommit$afterBlobId": "The full blob ID of the commit used to establish the 'after' of the comparison.
", + "CommentsForComparedCommit$beforeBlobId": "The full blob ID of the commit used to establish the before of the comparison.
", + "CommentsForComparedCommit$afterBlobId": "The full blob ID of the commit used to establish the after of the comparison.
", "CommentsForPullRequest$beforeBlobId": "The full blob ID of the file on which you want to comment on the destination commit.
", "CommentsForPullRequest$afterBlobId": "The full blob ID of the file on which you want to comment on the source commit.
", - "Commit$commitId": "The full SHA of the specified commit.
", + "Commit$commitId": "The full SHA ID of the specified commit.
", "Commit$treeId": "Tree information for the specified commit.
", "CommitIdsInputList$member": null, "CreateCommitOutput$commitId": "The full commit ID of the commit that contains your committed file changes.
", @@ -1766,15 +2263,15 @@ "FileMetadata$blobId": "The blob ID that contains the file information.
", "Folder$treeId": "The full SHA-1 pointer of the tree information for the commit that contains the folder.
", "GetBlobInput$blobId": "The ID of the blob, which is its SHA-1 pointer.
", - "GetCommitInput$commitId": "The commit ID. Commit IDs are the full SHA of the commit.
", + "GetCommitInput$commitId": "The commit ID. Commit IDs are the full SHA ID of the commit.
", "GetFileOutput$commitId": "The full commit ID of the commit that contains the content returned by GetFile.
", "GetFileOutput$blobId": "The blob ID of the object that represents the file content.
", - "GetFolderOutput$commitId": "The full commit ID used as a reference for which version of the folder content is returned.
", + "GetFolderOutput$commitId": "The full commit ID used as a reference for the returned version of the folder content.
", "GetFolderOutput$treeId": "The full SHA-1 pointer of the tree information for the commit that contains the folder.
", "GetMergeCommitOutput$sourceCommitId": "The commit ID of the source commit specifier that was used in the merge evaluation.
", "GetMergeCommitOutput$destinationCommitId": "The commit ID of the destination commit specifier that was used in the merge evaluation.
", "GetMergeCommitOutput$baseCommitId": "The commit ID of the merge base.
", - "GetMergeCommitOutput$mergedCommitId": "The commit ID for the merge commit created when the source branch was merged into the destination branch. If the fast-forward merge strategy was used, no merge commit exists.
", + "GetMergeCommitOutput$mergedCommitId": "The commit ID for the merge commit created when the source branch was merged into the destination branch. If the fast-forward merge strategy was used, there is no merge commit.
", "GetMergeConflictsOutput$destinationCommitId": "The commit ID of the destination commit specifier that was used in the merge evaluation.
", "GetMergeConflictsOutput$sourceCommitId": "The commit ID of the source commit specifier that was used in the merge evaluation.
", "GetMergeConflictsOutput$baseCommitId": "The commit ID of the merge base.
", @@ -1791,11 +2288,11 @@ "MergePullRequestBySquashInput$sourceCommitId": "The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.
", "MergePullRequestByThreeWayInput$sourceCommitId": "The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.
", "ParentList$member": null, - "PostCommentForComparedCommitOutput$beforeBlobId": "In the directionality you established, the blob ID of the 'before' blob.
", - "PostCommentForComparedCommitOutput$afterBlobId": "In the directionality you established, the blob ID of the 'after' blob.
", - "PostCommentForPullRequestOutput$beforeBlobId": "In the directionality of the pull request, the blob ID of the 'before' blob.
", - "PostCommentForPullRequestOutput$afterBlobId": "In the directionality of the pull request, the blob ID of the 'after' blob.
", - "PutFileOutput$commitId": "The full SHA of the commit that contains this file change.
", + "PostCommentForComparedCommitOutput$beforeBlobId": "In the directionality you established, the blob ID of the before blob.
", + "PostCommentForComparedCommitOutput$afterBlobId": "In the directionality you established, the blob ID of the after blob.
", + "PostCommentForPullRequestOutput$beforeBlobId": "In the directionality of the pull request, the blob ID of the before blob.
", + "PostCommentForPullRequestOutput$afterBlobId": "In the directionality of the pull request, the blob ID of the after blob.
", + "PutFileOutput$commitId": "The full SHA ID of the commit that contains this file change.
", "PutFileOutput$blobId": "The ID of the blob, which is its SHA-1 pointer.
", "PutFileOutput$treeId": "The full SHA-1 pointer of the tree information for the commit that contains this file change.
", "SubModule$commitId": "The commit ID that contains the reference to the submodule.
", @@ -1828,6 +2325,41 @@ "ListRepositoriesInput$order": "The order in which to sort the results of a list repositories operation.
" } }, + "OriginApprovalRuleTemplate": { + "base": "Returns information about the template that created the approval rule for a pull request.
", + "refs": { + "ApprovalRule$originApprovalRuleTemplate": "The approval rule template used to create the rule.
" + } + }, + "Overridden": { + "base": null, + "refs": { + "Evaluation$overridden": "Whether the approval rule requirements for the pull request have been overridden and no longer need to be met.
", + "GetPullRequestOverrideStateOutput$overridden": "A Boolean value that indicates whether a pull request has had its rules set aside (TRUE) or whether all approval rules still apply (FALSE).
" + } + }, + "OverrideAlreadySetException": { + "base": "The pull request has already had its approval rules set to override.
", + "refs": { + } + }, + "OverridePullRequestApprovalRulesInput": { + "base": null, + "refs": { + } + }, + "OverrideStatus": { + "base": null, + "refs": { + "ApprovalRuleOverriddenEventMetadata$overrideStatus": "The status of the override event.
", + "OverridePullRequestApprovalRulesInput$overrideStatus": "Whether you want to set aside approval rule requirements for the pull request (OVERRIDE) or revoke a previous override and apply approval rule requirements (REVOKE). REVOKE status is not stored.
" + } + }, + "OverrideStatusRequiredException": { + "base": "An override status is required, but no value was provided. Valid values include OVERRIDE and REVOKE.
", + "refs": { + } + }, "ParentCommitDoesNotExistException": { "base": "The parent commit ID is not valid because it does not exist. The specified parent commit ID does not exist in the specified branch of the repository.
", "refs": { @@ -1853,33 +2385,33 @@ "base": null, "refs": { "BatchDescribeMergeConflictsError$filePath": "The path to the file.
", - "BlobMetadata$path": "The path to the blob and any associated file name, if any.
", + "BlobMetadata$path": "The path to the blob and associated file name, if any.
", "ConflictMetadata$filePath": "The path of the file that contains conflicts.
", - "DeleteFileEntry$filePath": "The full path of the file that will be deleted, including the name of the file.
", - "DeleteFileInput$filePath": "The fully-qualified path to the file that will be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.
", - "DeleteFileOutput$filePath": "The fully-qualified path to the file that will be deleted, including the full name and extension of that file.
", + "DeleteFileEntry$filePath": "The full path of the file to be deleted, including the name of the file.
", + "DeleteFileInput$filePath": "The fully qualified path to the file that to be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.
", + "DeleteFileOutput$filePath": "The fully qualified path to the file to be deleted, including the full name and extension of that file.
", "DescribeMergeConflictsInput$filePath": "The path of the target files used to describe the conflicts.
", - "File$absolutePath": "The fully-qualified path to the file in the repository.
", + "File$absolutePath": "The fully qualified path to the file in the repository.
", "File$relativePath": "The relative path of the file from the folder where the query originated.
", - "FileMetadata$absolutePath": "The full path to the file that will be added or updated, including the name of the file.
", + "FileMetadata$absolutePath": "The full path to the file to be added or updated, including the name of the file.
", "FilePaths$member": null, - "Folder$absolutePath": "The fully-qualified path of the folder in the repository.
", + "Folder$absolutePath": "The fully qualified path of the folder in the repository.
", "Folder$relativePath": "The relative path of the specified folder from the folder where the query originated.
", - "GetDifferencesInput$beforePath": "The file path in which to check for differences. Limits the results to this path. Can also be used to specify the previous name of a directory or folder. If beforePath
and afterPath
are not specified, differences will be shown for all paths.
The file path in which to check differences. Limits the results to this path. Can also be used to specify the changed name of a directory or folder, if it has changed. If not specified, differences will be shown for all paths.
", - "GetFileInput$filePath": "The fully-qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully-qualified path to a file named file.md in a folder named examples.
", - "GetFileOutput$filePath": "The fully qualified path to the specified file. This returns the name and extension of the file.
", - "GetFolderInput$folderPath": "The fully-qualified path to the folder whose contents will be returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.
", - "GetFolderOutput$folderPath": "The fully-qualified path of the folder whose contents are returned.
", + "GetDifferencesInput$beforePath": "The file path in which to check for differences. Limits the results to this path. Can also be used to specify the previous name of a directory or folder. If beforePath
and afterPath
are not specified, differences are shown for all paths.
The file path in which to check differences. Limits the results to this path. Can also be used to specify the changed name of a directory or folder, if it has changed. If not specified, differences are shown for all paths.
", + "GetFileInput$filePath": "The fully qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully qualified path to a file named file.md in a folder named examples.
", + "GetFileOutput$filePath": "The fully qualified path to the specified file. Returns the name and extension of the file.
", + "GetFolderInput$folderPath": "The fully qualified path to the folder whose contents are returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.
", + "GetFolderOutput$folderPath": "The fully qualified path of the folder whose contents are returned.
", "Location$filePath": "The name of the file being compared, including its extension and subdirectory, if any.
", "PutFileEntry$filePath": "The full path to the file in the repository, including the name of the file.
", - "PutFileInput$filePath": "The name of the file you want to add or update, including the relative path to the file in the repository.
If the path does not currently exist in the repository, the path will be created as part of adding the file.
The name of the file you want to add or update, including the relative path to the file in the repository.
If the path does not currently exist in the repository, the path is created as part of adding the file.
The path of the conflicting file.
", "SetFileModeEntry$filePath": "The full path to the file, including the name of the file.
", "SourceFileSpecifier$filePath": "The full path to the file, including the name of the file.
", "SubModule$absolutePath": "The fully qualified path to the folder that contains the reference to the submodule.
", "SubModule$relativePath": "The relative path of the submodule from the folder where the query originated.
", - "SymbolicLink$absolutePath": "The fully-qualified path to the folder that contains the symbolic link.
", + "SymbolicLink$absolutePath": "The fully qualified path to the folder that contains the symbolic link.
", "SymbolicLink$relativePath": "The relative path of the symbolic link from the folder where the query originated.
" } }, @@ -1896,7 +2428,7 @@ "Position": { "base": null, "refs": { - "Location$filePosition": "The position of a change within a compared file, in line number format.
" + "Location$filePosition": "The position of a change in a compared file, in line number format.
" } }, "PostCommentForComparedCommitInput": { @@ -1934,7 +2466,7 @@ "refs": { "CreatePullRequestOutput$pullRequest": "Information about the newly created pull request.
", "GetPullRequestOutput$pullRequest": "Information about the specified pull request.
", - "MergePullRequestByFastForwardOutput$pullRequest": "Information about the specified pull request, including information about the merge.
", + "MergePullRequestByFastForwardOutput$pullRequest": "Information about the specified pull request, including the merge.
", "MergePullRequestBySquashOutput$pullRequest": null, "MergePullRequestByThreeWayOutput$pullRequest": null, "UpdatePullRequestDescriptionOutput$pullRequest": "Information about the updated pull request.
", @@ -1947,6 +2479,16 @@ "refs": { } }, + "PullRequestApprovalRulesNotSatisfiedException": { + "base": "The pull request cannot be merged because one or more approval rules applied to the pull request have conditions that have not been met.
", + "refs": { + } + }, + "PullRequestCannotBeApprovedByAuthorException": { + "base": "The approval cannot be applied because the user approving the pull request matches the user who created the pull request. You cannot approve a pull request that you created.
", + "refs": { + } + }, "PullRequestCreatedEventMetadata": { "base": "Metadata about the pull request that is used when comparing the pull request source with its destination.
", "refs": { @@ -1974,24 +2516,32 @@ "base": null, "refs": { "DescribePullRequestEventsInput$pullRequestEventType": "Optional. The pull request event type about which you want to return information.
", - "PullRequestEvent$pullRequestEventType": "The type of the pull request event, for example a status change event (PULL_REQUEST_STATUS_CHANGED) or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED).
" + "PullRequestEvent$pullRequestEventType": "The type of the pull request event (for example, a status change event (PULL_REQUEST_STATUS_CHANGED) or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED)).
" } }, "PullRequestId": { "base": null, "refs": { "CommentsForPullRequest$pullRequestId": "The system-generated ID of the pull request.
", + "CreatePullRequestApprovalRuleInput$pullRequestId": "The system-generated ID of the pull request for which you want to create the approval rule.
", + "DeletePullRequestApprovalRuleInput$pullRequestId": "The system-generated ID of the pull request that contains the approval rule you want to delete.
", "DescribePullRequestEventsInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", + "EvaluatePullRequestApprovalRulesInput$pullRequestId": "The system-generated ID of the pull request you want to evaluate.
", "GetCommentsForPullRequestInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", + "GetPullRequestApprovalStatesInput$pullRequestId": "The system-generated ID for the pull request.
", "GetPullRequestInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", + "GetPullRequestOverrideStateInput$pullRequestId": "The ID of the pull request for which you want to get information about whether approval rules have been set aside (overridden).
", "MergePullRequestByFastForwardInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", "MergePullRequestBySquashInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", "MergePullRequestByThreeWayInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", + "OverridePullRequestApprovalRulesInput$pullRequestId": "The system-generated ID of the pull request for which you want to override all approval rule requirements. To get this information, use GetPullRequest.
", "PostCommentForPullRequestInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", "PostCommentForPullRequestOutput$pullRequestId": "The system-generated ID of the pull request.
", "PullRequest$pullRequestId": "The system-generated ID of the pull request.
", "PullRequestEvent$pullRequestId": "The system-generated ID of the pull request.
", "PullRequestIdList$member": null, + "UpdatePullRequestApprovalRuleContentInput$pullRequestId": "The system-generated ID of the pull request.
", + "UpdatePullRequestApprovalStateInput$pullRequestId": "The system-generated ID of the pull request.
", "UpdatePullRequestDescriptionInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", "UpdatePullRequestStatusInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
", "UpdatePullRequestTitleInput$pullRequestId": "The system-generated ID of the pull request. To get this ID, use ListPullRequests.
" @@ -2032,7 +2582,7 @@ "ListPullRequestsInput$pullRequestStatus": "Optional. The status of the pull request. If used, this refines the results to the pull requests that match the specified status.
", "PullRequest$pullRequestStatus": "The status of the pull request. Pull request status can only change from OPEN
to CLOSED
.
The changed status of the pull request.
", - "UpdatePullRequestStatusInput$pullRequestStatus": "The status of the pull request. The only valid operations are to update the status from OPEN
to OPEN
, OPEN
to CLOSED
or from from CLOSED
to CLOSED
.
The status of the pull request. The only valid operations are to update the status from OPEN
to OPEN
, OPEN
to CLOSED
or from CLOSED
to CLOSED
.
Information about a file that will be added or updated as part of a commit.
", + "base": "Information about a file added or updated as part of a commit.
", "refs": { "PutFileEntries$member": null } @@ -2080,7 +2630,7 @@ } }, "PutRepositoryTriggersInput": { - "base": "Represents the input ofa put repository triggers operation.
", + "base": "Represents the input of a put repository triggers operation.
", "refs": { } }, @@ -2097,11 +2647,11 @@ "ReferenceName": { "base": null, "refs": { - "PullRequestMergedStateChangedEventMetadata$destinationReference": "The name of the branch that the pull request will be merged into.
", + "PullRequestMergedStateChangedEventMetadata$destinationReference": "The name of the branch that the pull request is merged into.
", "PullRequestTarget$sourceReference": "The branch of the repository that contains the changes for the pull request. Also known as the source branch.
", - "PullRequestTarget$destinationReference": "The branch of the repository where the pull request changes will be merged into. Also known as the destination branch.
", + "PullRequestTarget$destinationReference": "The branch of the repository where the pull request changes are merged. Also known as the destination branch.
", "Target$sourceReference": "The branch of the repository that contains the changes for the pull request. Also known as the source branch.
", - "Target$destinationReference": "The branch of the repository where the pull request changes will be merged into. Also known as the destination branch.
" + "Target$destinationReference": "The branch of the repository where the pull request changes are merged. Also known as the destination branch.
" } }, "ReferenceNameRequiredException": { @@ -2117,13 +2667,13 @@ "RelativeFileVersionEnum": { "base": null, "refs": { - "Location$relativeFileVersion": "In a comparison of commits or a pull request, whether the change is in the 'before' or 'after' of that comparison.
" + "Location$relativeFileVersion": "In a comparison of commits or a pull request, whether the change is in the before or after of that comparison.
" } }, "ReplaceContentEntries": { "base": null, "refs": { - "ConflictResolution$replaceContents": "Files that will have content replaced as part of the merge conflict resolution.
" + "ConflictResolution$replaceContents": "Files to have content replaced as part of the merge conflict resolution.
" } }, "ReplaceContentEntry": { @@ -2133,7 +2683,7 @@ } }, "ReplacementContentRequiredException": { - "base": "USE_NEW_CONTENT was specified but no replacement content has been provided.
", + "base": "USE_NEW_CONTENT was specified, but no replacement content has been provided.
", "refs": { } }, @@ -2151,7 +2701,7 @@ "RepositoryDescription": { "base": null, "refs": { - "CreateRepositoryInput$repositoryDescription": "A comment or description about the new repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.
A comment or description about the new repository.
The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.
A comment or description about the repository.
", "UpdateRepositoryDescriptionInput$repositoryDescription": "The new comment or description for the specified repository. Repository descriptions are limited to 1,000 characters.
" } @@ -2191,18 +2741,22 @@ "RepositoryName": { "base": null, "refs": { + "AssociateApprovalRuleTemplateWithRepositoryInput$repositoryName": "The name of the repository that you want to associate with the template.
", + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$repositoryName": "The name of the repository where the association was not made.
", "BatchDescribeMergeConflictsInput$repositoryName": "The name of the repository that contains the merge conflicts you want to review.
", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$repositoryName": "The name of the repository where the association with the template was not able to be removed.
", "BatchGetCommitsInput$repositoryName": "The name of the repository that contains the commits.
", "CommentsForComparedCommit$repositoryName": "The name of the repository that contains the compared commits.
", "CommentsForPullRequest$repositoryName": "The name of the repository that contains the pull request.
", "CreateBranchInput$repositoryName": "The name of the repository in which you want to create the new branch.
", - "CreateCommitInput$repositoryName": "The name of the repository where you will create the commit.
", - "CreateRepositoryInput$repositoryName": "The name of the new repository to be created.
The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix \".git\" is prohibited.
The name of the repository where you create the commit.
", + "CreateRepositoryInput$repositoryName": "The name of the new repository to be created.
The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix .git is prohibited.
The name of the repository where you want to create the unreferenced merge commit.
", "DeleteBranchInput$repositoryName": "The name of the repository that contains the branch to be deleted.
", "DeleteFileInput$repositoryName": "The name of the repository that contains the file to delete.
", "DeleteRepositoryInput$repositoryName": "The name of the repository to delete.
", "DescribeMergeConflictsInput$repositoryName": "The name of the repository where you want to get information about a merge conflict.
", + "DisassociateApprovalRuleTemplateFromRepositoryInput$repositoryName": "The name of the repository you want to disassociate from the template.
", "GetBlobInput$repositoryName": "The name of the repository that contains the blob.
", "GetBranchInput$repositoryName": "The name of the repository that contains the branch for which you want to retrieve information.
", "GetCommentsForComparedCommitInput$repositoryName": "The name of the repository where you want to compare commits.
", @@ -2216,6 +2770,7 @@ "GetMergeOptionsInput$repositoryName": "The name of the repository that contains the commits about which you want to get merge options.
", "GetRepositoryInput$repositoryName": "The name of the repository to get information about.
", "GetRepositoryTriggersInput$repositoryName": "The name of the repository for which the trigger is configured.
", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$repositoryName": "The name of the repository for which you want to list all associated approval rule templates.
", "ListBranchesInput$repositoryName": "The name of the repository that contains the branches.
", "ListPullRequestsInput$repositoryName": "The name of the repository for which you want to list pull requests.
", "MergeBranchesByFastForwardInput$repositoryName": "The name of the repository where you want to merge two branches.
", @@ -2242,7 +2797,7 @@ "TestRepositoryTriggersInput$repositoryName": "The name of the repository in which to test the triggers.
", "UpdateDefaultBranchInput$repositoryName": "The name of the repository to set or change the default branch for.
", "UpdateRepositoryDescriptionInput$repositoryName": "The name of the repository to set or change the comment or description for.
", - "UpdateRepositoryNameInput$oldName": "The existing name of the repository.
", + "UpdateRepositoryNameInput$oldName": "The current name of the repository.
", "UpdateRepositoryNameInput$newName": "The new name for the repository.
" } }, @@ -2266,16 +2821,21 @@ "RepositoryNameList": { "base": null, "refs": { - "BatchGetRepositoriesInput$repositoryNames": "The names of the repositories to get information about.
" + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput$repositoryNames": "The names of the repositories you want to associate with the template.
The length constraint limit is for each string in the array. The array itself can be empty.
A list of names of the repositories that have been associated with the template.
", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput$repositoryNames": "The repository names that you want to disassociate from the approval rule template.
The length constraint limit is for each string in the array. The array itself can be empty.
A list of repository names that have had their association with the template removed.
", + "BatchGetRepositoriesInput$repositoryNames": "The names of the repositories to get information about.
The length constraint limit is for each string in the array. The array itself can be empty.
A list of repository names that are associated with the specified approval rule template.
" } }, "RepositoryNameRequiredException": { - "base": "A repository name is required but was not specified.
", + "base": "A repository name is required, but was not specified.
", "refs": { } }, "RepositoryNamesRequiredException": { - "base": "A repository names object is required but was not specified.
", + "base": "At least one repository name object is required, but was not specified.
", "refs": { } }, @@ -2297,18 +2857,18 @@ } }, "RepositoryTriggerBranchNameListRequiredException": { - "base": "At least one branch name is required but was not specified in the trigger configuration.
", + "base": "At least one branch name is required, but was not specified in the trigger configuration.
", "refs": { } }, "RepositoryTriggerCustomData": { "base": null, "refs": { - "RepositoryTrigger$customData": "Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.
" + "RepositoryTrigger$customData": "Any custom data associated with the trigger to be included in the information sent to the target of the trigger.
" } }, "RepositoryTriggerDestinationArnRequiredException": { - "base": "A destination ARN for the target service for the trigger is required but was not specified.
", + "base": "A destination ARN for the target service for the trigger is required, but was not specified.
", "refs": { } }, @@ -2321,11 +2881,11 @@ "RepositoryTriggerEventList": { "base": null, "refs": { - "RepositoryTrigger$events": "The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.
The valid value \"all\" cannot be used with any other values.
The repository events that cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.
The valid value \"all\" cannot be used with any other values.
At least one event for the trigger is required but was not specified.
", + "base": "At least one event for the trigger is required, but was not specified.
", "refs": { } }, @@ -2338,13 +2898,13 @@ "RepositoryTriggerExecutionFailureList": { "base": null, "refs": { - "TestRepositoryTriggersOutput$failedExecutions": "The list of triggers that were not able to be tested. This list provides the names of the triggers that could not be tested, separated by commas.
" + "TestRepositoryTriggersOutput$failedExecutions": "The list of triggers that were not tested. This list provides the names of the triggers that could not be tested, separated by commas.
" } }, "RepositoryTriggerExecutionFailureMessage": { "base": null, "refs": { - "RepositoryTriggerExecutionFailure$failureMessage": "Additional message information about the trigger that did not run.
" + "RepositoryTriggerExecutionFailure$failureMessage": "Message information about the trigger that did not run.
" } }, "RepositoryTriggerName": { @@ -2362,7 +2922,7 @@ } }, "RepositoryTriggerNameRequiredException": { - "base": "A name for the trigger is required but was not specified.
", + "base": "A name for the trigger is required, but was not specified.
", "refs": { } }, @@ -2382,7 +2942,7 @@ } }, "RepositoryTriggersListRequiredException": { - "base": "The list of triggers for the repository is required but was not specified.
", + "base": "The list of triggers for the repository is required, but was not specified.
", "refs": { } }, @@ -2404,6 +2964,38 @@ "refs": { } }, + "RevisionId": { + "base": null, + "refs": { + "ApprovalRuleOverriddenEventMetadata$revisionId": "The revision ID of the pull request when the override event occurred.
", + "ApprovalStateChangedEventMetadata$revisionId": "The revision ID of the pull request when the approval state changed.
", + "EvaluatePullRequestApprovalRulesInput$revisionId": "The system-generated ID for the pull request revision. To retrieve the most recent revision ID for a pull request, use GetPullRequest.
", + "GetPullRequestApprovalStatesInput$revisionId": "The system-generated ID for the pull request revision.
", + "GetPullRequestOverrideStateInput$revisionId": "The system-generated ID of the revision for the pull request. To retrieve the most recent revision ID, use GetPullRequest.
", + "OverridePullRequestApprovalRulesInput$revisionId": "The system-generated ID of the most recent revision of the pull request. You cannot override approval rules for anything but the most recent revision of a pull request. To get the revision ID, use GetPullRequest.
", + "PullRequest$revisionId": "The system-generated revision ID for the pull request.
", + "UpdatePullRequestApprovalStateInput$revisionId": "The system-generated ID of the revision.
" + } + }, + "RevisionIdRequiredException": { + "base": "A revision ID is required, but was not provided.
", + "refs": { + } + }, + "RevisionNotCurrentException": { + "base": "The revision ID provided in the request does not match the current revision ID. Use GetPullRequest to retrieve the current revision ID.
", + "refs": { + } + }, + "RuleContentSha256": { + "base": null, + "refs": { + "ApprovalRule$ruleContentSha256": "The SHA-256 hash signature for the content of the approval rule.
", + "ApprovalRuleTemplate$ruleContentSha256": "The SHA-256 hash signature for the content of the approval rule template.
", + "UpdateApprovalRuleTemplateContentInput$existingRuleContentSha256": "The SHA-256 hash signature for the content of the approval rule. You can retrieve this information by using GetPullRequest.
", + "UpdatePullRequestApprovalRuleContentInput$existingRuleContentSha256": "The SHA-256 hash signature for the content of the approval rule. You can retrieve this information by using GetPullRequest.
" + } + }, "SameFileContentException": { "base": "The file was not added or updated because the content of the file is exactly the same as the content of that file in the repository and branch that you specified.
", "refs": { @@ -2417,7 +3009,7 @@ "SetFileModeEntries": { "base": null, "refs": { - "ConflictResolution$setFileModes": "File modes that will be set as part of the merge conflict resolution.
", + "ConflictResolution$setFileModes": "File modes that are set as part of the merge conflict resolution.
", "CreateCommitInput$setFileModes": "The file modes to update for files in this commit.
" } }, @@ -2434,7 +3026,7 @@ } }, "SourceAndDestinationAreSameException": { - "base": "The source branch and the destination branch for the pull request are the same. You must specify different branches for the source and destination.
", + "base": "The source branch and destination branch for the pull request are the same. You must specify different branches for the source and destination.
", "refs": { } }, @@ -2458,7 +3050,7 @@ "SubModuleList": { "base": null, "refs": { - "GetFolderOutput$subModules": "The list of submodules that exist in the specified folder, if any.
" + "GetFolderOutput$subModules": "The list of submodules in the specified folder, if any.
" } }, "SymbolicLink": { @@ -2470,7 +3062,7 @@ "SymbolicLinkList": { "base": null, "refs": { - "GetFolderOutput$symbolicLinks": "The list of symbolic links to other files and folders that exist in the specified folder, if any.
" + "GetFolderOutput$symbolicLinks": "The list of symbolic links to other files and folders in the specified folder, if any.
" } }, "TagKey": { @@ -2529,7 +3121,7 @@ "TargetList": { "base": null, "refs": { - "CreatePullRequestInput$targets": "The targets for the pull request, including the source of the code to be reviewed (the source branch), and the destination where the creator of the pull request intends the code to be merged after the pull request is closed (the destination branch).
" + "CreatePullRequestInput$targets": "The targets for the pull request, including the source of the code to be reviewed (the source branch) and the destination where the creator of the pull request intends the code to be merged after the pull request is closed (the destination branch).
" } }, "TargetRequiredException": { @@ -2565,9 +3157,9 @@ "Title": { "base": null, "refs": { - "CreatePullRequestInput$title": "The title of the pull request. This title will be used to identify the pull request to other users in the repository.
", - "PullRequest$title": "The user-defined title of the pull request. This title is displayed in the list of pull requests to other users of the repository.
", - "UpdatePullRequestTitleInput$title": "The updated title of the pull request. This will replace the existing title.
" + "CreatePullRequestInput$title": "The title of the pull request. This title is used to identify the pull request to other users in the repository.
", + "PullRequest$title": "The user-defined title of the pull request. This title is displayed in the list of pull requests to other repository users.
", + "UpdatePullRequestTitleInput$title": "The updated title of the pull request. This replaces the existing title.
" } }, "TitleRequiredException": { @@ -2585,6 +3177,36 @@ "refs": { } }, + "UpdateApprovalRuleTemplateContentInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateContentOutput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateDescriptionInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateDescriptionOutput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateNameInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateNameOutput": { + "base": null, + "refs": { + } + }, "UpdateCommentInput": { "base": null, "refs": { @@ -2600,6 +3222,21 @@ "refs": { } }, + "UpdatePullRequestApprovalRuleContentInput": { + "base": null, + "refs": { + } + }, + "UpdatePullRequestApprovalRuleContentOutput": { + "base": null, + "refs": { + } + }, + "UpdatePullRequestApprovalStateInput": { + "base": null, + "refs": { + } + }, "UpdatePullRequestDescriptionInput": { "base": null, "refs": { diff --git a/models/apis/codecommit/2015-04-13/paginators-1.json b/models/apis/codecommit/2015-04-13/paginators-1.json index c917eaa80e8..5fcda36b895 100644 --- a/models/apis/codecommit/2015-04-13/paginators-1.json +++ b/models/apis/codecommit/2015-04-13/paginators-1.json @@ -30,6 +30,16 @@ "limit_key": "maxConflictFiles", "output_token": "nextToken" }, + "ListApprovalRuleTemplates": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, + "ListAssociatedApprovalRuleTemplatesForRepository": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, "ListBranches": { "input_token": "nextToken", "output_token": "nextToken", @@ -44,6 +54,11 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "repositories" + }, + "ListRepositoriesForApprovalRuleTemplate": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" } } } \ No newline at end of file diff --git a/models/apis/cognito-idp/2016-04-18/api-2.json b/models/apis/cognito-idp/2016-04-18/api-2.json index 3e1fab4ec33..e9c26736531 100644 --- a/models/apis/cognito-idp/2016-04-18/api-2.json +++ b/models/apis/cognito-idp/2016-04-18/api-2.json @@ -2571,7 +2571,8 @@ "REFRESH_TOKEN", "CUSTOM_AUTH", "ADMIN_NO_SRP_AUTH", - "USER_PASSWORD_AUTH" + "USER_PASSWORD_AUTH", + "ADMIN_USER_PASSWORD_AUTH" ] }, "AuthParametersType":{ @@ -2960,7 +2961,8 @@ "AllowedOAuthFlows":{"shape":"OAuthFlowsType"}, "AllowedOAuthScopes":{"shape":"ScopeListType"}, "AllowedOAuthFlowsUserPoolClient":{"shape":"BooleanType"}, - "AnalyticsConfiguration":{"shape":"AnalyticsConfigurationType"} + "AnalyticsConfiguration":{"shape":"AnalyticsConfigurationType"}, + "PreventUserExistenceErrors":{"shape":"PreventUserExistenceErrorTypes"} } }, "CreateUserPoolClientResponse":{ @@ -3357,7 +3359,9 @@ "members":{ "SourceArn":{"shape":"ArnType"}, "ReplyToEmailAddress":{"shape":"EmailAddressType"}, - "EmailSendingAccount":{"shape":"EmailSendingAccountType"} + "EmailSendingAccount":{"shape":"EmailSendingAccountType"}, + "From":{"shape":"StringType"}, + "ConfigurationSet":{"shape":"SESConfigurationSet"} } }, "EmailNotificationBodyType":{ @@ -3488,7 +3492,12 @@ "enum":[ "ADMIN_NO_SRP_AUTH", "CUSTOM_AUTH_FLOW_ONLY", - "USER_PASSWORD_AUTH" + "USER_PASSWORD_AUTH", + "ALLOW_ADMIN_USER_PASSWORD_AUTH", + "ALLOW_CUSTOM_AUTH", + "ALLOW_USER_PASSWORD_AUTH", + "ALLOW_USER_SRP_AUTH", + "ALLOW_REFRESH_TOKEN_AUTH" ] }, "FeedbackValueType":{ @@ -4218,6 +4227,13 @@ }, "exception":true }, + "PreventUserExistenceErrorTypes":{ + "type":"string", + "enum":[ + "LEGACY", + "ENABLED" + ] + }, "ProviderDescription":{ "type":"structure", "members":{ @@ -4424,6 +4440,12 @@ "min":3, "pattern":"^[0-9A-Za-z\\.\\-_]*(?Removes the specified tags from an Amazon Cognito user pool. You can use this action up to 5 times per second, per account", "UpdateAuthEventFeedback": "Provides the feedback for an authentication event whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
", "UpdateDeviceStatus": "Updates the device status.
", - "UpdateGroup": "Updates the specified group with the specified attributes.
Calling this action requires developer credentials.
", + "UpdateGroup": "Updates the specified group with the specified attributes.
Calling this action requires developer credentials.
If you don't provide a value for an attribute, it will be set to the default value.
Updates identity provider information for a user pool.
", - "UpdateResourceServer": "Updates the name and scopes of resource server. All other fields are read-only.
", + "UpdateResourceServer": "Updates the name and scopes of resource server. All other fields are read-only.
If you don't provide a value for an attribute, it will be set to the default value.
Allows a user to update a specific attribute (one at a time).
", - "UpdateUserPool": "Updates the specified user pool with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool settings with .
", - "UpdateUserPoolClient": "Updates the specified user pool app client with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool app client settings with .
", + "UpdateUserPool": "Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings with .
If you don't provide a value for an attribute, it will be set to the default value.
Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings with .
If you don't provide a value for an attribute, it will be set to the default value.
Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.
You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You cannot use it to change the domain for a user pool.
A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with AWS Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.
Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.
However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.
When you add your new certificate in ACM, you must choose US East (N. Virginia) as the AWS Region.
After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.
For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.
", "VerifySoftwareToken": "Use this API to register a user's entered TOTP code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.
", "VerifyUserAttribute": "Verifies the specified user attributes in the user pool.
" @@ -573,8 +573,8 @@ "AuthFlowType": { "base": null, "refs": { - "AdminInitiateAuthRequest$AuthFlow": "The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
ADMIN_NO_SRP_AUTH
: Non-SRP authentication flow; you can pass in the USERNAME and PASSWORD directly if the flow is enabled for calling the app client.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
ADMIN_NO_SRP_AUTH
is not a valid value.
The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
ADMIN_NO_SRP_AUTH
: Non-SRP authentication flow; you can pass in the USERNAME and PASSWORD directly if the flow is enabled for calling the app client.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
ADMIN_USER_PASSWORD_AUTH
: Admin-based user password authentication. This replaces the ADMIN_NO_SRP_AUTH
authentication flow. In this flow, Cognito receives the password in the request instead of using the SRP process to verify passwords.
The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
ADMIN_USER_PASSWORD_AUTH
: Admin-based user password authentication. This replaces the ADMIN_NO_SRP_AUTH
authentication flow. In this flow, Cognito receives the password in the request instead of using the SRP process to verify passwords.
ADMIN_NO_SRP_AUTH
is not a valid value.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, post authentication, user migration, pre token generation, define auth challenge, create auth challenge, and verify auth challenge response. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the functions that are assigned to the post confirmation and pre mutation triggers. When Amazon Cognito invokes either of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the functions that are assigned to the custom message and pre mutation triggers. When Amazon Cognito invokes either of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata
attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata
value to enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
Take the following limitations into consideration when you use the ClientMetadata parameter:
Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.
Amazon Cognito does not validate the ClientMetadata value.
Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.
The explicit authentication flows.
", - "UpdateUserPoolClientRequest$ExplicitAuthFlows": "Explicit authentication flows.
", - "UserPoolClientType$ExplicitAuthFlows": "The explicit authentication flows.
" + "CreateUserPoolClientRequest$ExplicitAuthFlows": "The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_
prefix are deprecated in favor of new names with the ALLOW_
prefix. Note that values with ALLOW_
prefix cannot be used along with values without ALLOW_
prefix.
Valid values include:
ALLOW_ADMIN_USER_PASSWORD_AUTH
: Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH
. This setting replaces the ADMIN_NO_SRP_AUTH
setting. With this authentication flow, Cognito receives the password in the request instead of using the SRP (Secure Remote Password protocol) protocol to verify passwords.
ALLOW_CUSTOM_AUTH
: Enable Lambda trigger based authentication.
ALLOW_USER_PASSWORD_AUTH
: Enable user password-based authentication. In this flow, Cognito receives the password in the request instead of using the SRP protocol to verify passwords.
ALLOW_USER_SRP_AUTH
: Enable SRP based authentication.
ALLOW_REFRESH_TOKEN_AUTH
: Enable authflow to refresh tokens.
The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_
prefix are deprecated in favor of new names with the ALLOW_
prefix. Note that values with ALLOW_
prefix cannot be used along with values without ALLOW_
prefix.
Valid values include:
ALLOW_ADMIN_USER_PASSWORD_AUTH
: Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH
. This setting replaces the ADMIN_NO_SRP_AUTH
setting. With this authentication flow, Cognito receives the password in the request instead of using the SRP (Secure Remote Password protocol) protocol to verify passwords.
ALLOW_CUSTOM_AUTH
: Enable Lambda trigger based authentication.
ALLOW_USER_PASSWORD_AUTH
: Enable user password-based authentication. In this flow, Cognito receives the password in the request instead of using the SRP protocol to verify passwords.
ALLOW_USER_SRP_AUTH
: Enable SRP based authentication.
ALLOW_REFRESH_TOKEN_AUTH
: Enable authflow to refresh tokens.
The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_
prefix are deprecated in favor of new names with the ALLOW_
prefix. Note that values with ALLOW_
prefix cannot be used along with values without ALLOW_
prefix.
Valid values include:
ALLOW_ADMIN_USER_PASSWORD_AUTH
: Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH
. This setting replaces the ADMIN_NO_SRP_AUTH
setting. With this authentication flow, Cognito receives the password in the request instead of using the SRP (Secure Remote Password protocol) protocol to verify passwords.
ALLOW_CUSTOM_AUTH
: Enable Lambda trigger based authentication.
ALLOW_USER_PASSWORD_AUTH
: Enable user password-based authentication. In this flow, Cognito receives the password in the request instead of using the SRP protocol to verify passwords.
ALLOW_USER_SRP_AUTH
: Enable SRP based authentication.
ALLOW_REFRESH_TOKEN_AUTH
: Enable authflow to refresh tokens.
Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED
and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY
, those APIs will return a UserNotFoundException
exception if the user does not exist in the user pool.
Valid values include:
ENABLED
- This prevents user existence-related errors.
LEGACY
- This represents the old behavior of Cognito where user existence related errors are not prevented.
This setting affects the behavior of following APIs:
After January 1st 2020, the value of PreventUserExistenceErrors
will default to ENABLED
for newly created user pool clients if no value is provided.
Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED
and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY
, those APIs will return a UserNotFoundException
exception if the user does not exist in the user pool.
Valid values include:
ENABLED
- This prevents user existence-related errors.
LEGACY
- This represents the old behavior of Cognito where user existence related errors are not prevented.
This setting affects the behavior of following APIs:
After January 1st 2020, the value of PreventUserExistenceErrors
will default to ENABLED
for newly created user pool clients if no value is provided.
Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED
and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY
, those APIs will return a UserNotFoundException
exception if the user does not exist in the user pool.
Valid values include:
ENABLED
- This prevents user existence-related errors.
LEGACY
- This represents the old behavior of Cognito where user existence related errors are not prevented.
This setting affects the behavior of following APIs:
After January 1st 2020, the value of PreventUserExistenceErrors
will default to ENABLED
for newly created user pool clients if no value is provided.
A container for identity provider details.
", "refs": { @@ -2231,6 +2239,12 @@ "DomainDescriptionType$S3Bucket": "The S3 bucket where the static files for this domain are stored.
" } }, + "SESConfigurationSet": { + "base": null, + "refs": { + "EmailConfigurationType$ConfigurationSet": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:
Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.
IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.
The type used for enabling SMS MFA at the user level.
", "refs": { @@ -2503,6 +2517,7 @@ "DeviceSecretVerifierConfigType$PasswordVerifier": "The password verifier.
", "DeviceSecretVerifierConfigType$Salt": "The salt.
", "DomainDescriptionType$CloudFrontDistribution": "The ARN of the CloudFront distribution.
", + "EmailConfigurationType$From": "Identifies either the sender’s email address or the sender’s name with their email address. For example, testuser@example.com
or Test User <testuser@example.com>
. This address will appear before the body of the email.
The user's IP address.
", "EventContextDataType$DeviceName": "The user's device name.
", "EventContextDataType$Timezone": "The user's time zone.
", diff --git a/models/apis/config/2014-11-12/api-2.json b/models/apis/config/2014-11-12/api-2.json index c70b178d44a..97d0dba03bd 100644 --- a/models/apis/config/2014-11-12/api-2.json +++ b/models/apis/config/2014-11-12/api-2.json @@ -84,6 +84,18 @@ {"shape":"NoSuchConfigurationRecorderException"} ] }, + "DeleteConformancePack":{ + "name":"DeleteConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConformancePackRequest"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"ResourceInUseException"} + ] + }, "DeleteDeliveryChannel":{ "name":"DeleteDeliveryChannel", "http":{ @@ -122,6 +134,19 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "DeleteOrganizationConformancePack":{ + "name":"DeleteOrganizationConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOrganizationConformancePackRequest"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"ResourceInUseException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "DeletePendingAggregationRequest":{ "name":"DeletePendingAggregationRequest", "http":{ @@ -321,6 +346,49 @@ {"shape":"NoSuchConfigurationRecorderException"} ] }, + "DescribeConformancePackCompliance":{ + "name":"DescribeConformancePackCompliance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePackComplianceRequest"}, + "output":{"shape":"DescribeConformancePackComplianceResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchConfigRuleInConformancePackException"}, + {"shape":"NoSuchConformancePackException"} + ] + }, + "DescribeConformancePackStatus":{ + "name":"DescribeConformancePackStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePackStatusRequest"}, + "output":{"shape":"DescribeConformancePackStatusResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "DescribeConformancePacks":{ + "name":"DescribeConformancePacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePacksRequest"}, + "output":{"shape":"DescribeConformancePacksResponse"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, "DescribeDeliveryChannelStatus":{ "name":"DescribeDeliveryChannelStatus", "http":{ @@ -375,6 +443,36 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "DescribeOrganizationConformancePackStatuses":{ + "name":"DescribeOrganizationConformancePackStatuses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrganizationConformancePackStatusesRequest"}, + "output":{"shape":"DescribeOrganizationConformancePackStatusesResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, + "DescribeOrganizationConformancePacks":{ + "name":"DescribeOrganizationConformancePacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrganizationConformancePacksRequest"}, + "output":{"shape":"DescribeOrganizationConformancePacksResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidLimitException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "DescribePendingAggregationRequests":{ "name":"DescribePendingAggregationRequests", "http":{ @@ -544,6 +642,36 @@ {"shape":"InvalidParameterValueException"} ] }, + "GetConformancePackComplianceDetails":{ + "name":"GetConformancePackComplianceDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConformancePackComplianceDetailsRequest"}, + "output":{"shape":"GetConformancePackComplianceDetailsResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoSuchConformancePackException"}, + {"shape":"NoSuchConfigRuleInConformancePackException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "GetConformancePackComplianceSummary":{ + "name":"GetConformancePackComplianceSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConformancePackComplianceSummaryRequest"}, + "output":{"shape":"GetConformancePackComplianceSummaryResponse"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, "GetDiscoveredResourceCounts":{ "name":"GetDiscoveredResourceCounts", "http":{ @@ -573,6 +701,21 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "GetOrganizationConformancePackDetailedStatus":{ + "name":"GetOrganizationConformancePackDetailedStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOrganizationConformancePackDetailedStatusRequest"}, + "output":{"shape":"GetOrganizationConformancePackDetailedStatusResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "GetResourceConfigHistory":{ "name":"GetResourceConfigHistory", "http":{ @@ -693,6 +836,22 @@ {"shape":"InvalidRecordingGroupException"} ] }, + "PutConformancePack":{ + "name":"PutConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConformancePackRequest"}, + "output":{"shape":"PutConformancePackResponse"}, + "errors":[ + {"shape":"InsufficientPermissionsException"}, + {"shape":"ConformancePackTemplateValidationException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MaxNumberOfConformancePacksExceededException"} + ] + }, "PutDeliveryChannel":{ "name":"PutDeliveryChannel", "http":{ @@ -743,6 +902,25 @@ {"shape":"InsufficientPermissionsException"} ] }, + "PutOrganizationConformancePack":{ + "name":"PutOrganizationConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutOrganizationConformancePackRequest"}, + "output":{"shape":"PutOrganizationConformancePackResponse"}, + "errors":[ + {"shape":"MaxNumberOfOrganizationConformancePacksExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ValidationException"}, + {"shape":"OrganizationAccessDeniedException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"OrganizationConformancePackTemplateValidationException"}, + {"shape":"OrganizationAllFeaturesNotEnabledException"}, + {"shape":"NoAvailableOrganizationException"} + ] + }, "PutRemediationConfigurations":{ "name":"PutRemediationConfigurations", "http":{ @@ -1015,6 +1193,11 @@ "max":256, "min":1 }, + "Annotation":{ + "type":"string", + "max":256, + "min":0 + }, "AutoRemediationAttemptSeconds":{ "type":"long", "box":true, @@ -1398,6 +1581,211 @@ "member":{"shape":"ConfigurationRecorderStatus"} }, "ConfigurationStateId":{"type":"string"}, + "ConformancePackArn":{ + "type":"string", + "max":2048, + "min":1 + }, + "ConformancePackComplianceFilters":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConformancePackConfigRuleNames"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackComplianceResourceIds":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":5, + "min":0 + }, + "ConformancePackComplianceSummary":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackComplianceStatus" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackComplianceStatus":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackComplianceSummaryList":{ + "type":"list", + "member":{"shape":"ConformancePackComplianceSummary"}, + "max":5, + "min":1 + }, + "ConformancePackComplianceType":{ + "type":"string", + "enum":[ + "COMPLIANT", + "NON_COMPLIANT" + ] + }, + "ConformancePackConfigRuleNames":{ + "type":"list", + "member":{"shape":"StringWithCharLimit64"}, + "max":10, + "min":0 + }, + "ConformancePackDetail":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackArn", + "ConformancePackId", + "DeliveryS3Bucket" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackArn":{"shape":"ConformancePackArn"}, + "ConformancePackId":{"shape":"ConformancePackId"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "LastUpdateRequestedTime":{"shape":"Date"}, + "CreatedBy":{"shape":"StringWithCharLimit256"} + } + }, + "ConformancePackDetailList":{ + "type":"list", + "member":{"shape":"ConformancePackDetail"}, + "max":25, + "min":0 + }, + "ConformancePackEvaluationFilters":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConformancePackConfigRuleNames"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"}, + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceIds":{"shape":"ConformancePackComplianceResourceIds"} + } + }, + "ConformancePackEvaluationResult":{ + "type":"structure", + "required":[ + "ComplianceType", + "EvaluationResultIdentifier", + "ConfigRuleInvokedTime", + "ResultRecordedTime" + ], + "members":{ + "ComplianceType":{"shape":"ConformancePackComplianceType"}, + "EvaluationResultIdentifier":{"shape":"EvaluationResultIdentifier"}, + "ConfigRuleInvokedTime":{"shape":"Date"}, + "ResultRecordedTime":{"shape":"Date"}, + "Annotation":{"shape":"Annotation"} + } + }, + "ConformancePackId":{ + "type":"string", + "max":1024, + "min":1 + }, + "ConformancePackInputParameter":{ + "type":"structure", + "required":[ + "ParameterName", + "ParameterValue" + ], + "members":{ + "ParameterName":{"shape":"ParameterName"}, + "ParameterValue":{"shape":"ParameterValue"} + } + }, + "ConformancePackInputParameters":{ + "type":"list", + "member":{"shape":"ConformancePackInputParameter"}, + "max":60, + "min":0 + }, + "ConformancePackName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "ConformancePackNamesList":{ + "type":"list", + "member":{"shape":"ConformancePackName"}, + "max":25, + "min":0 + }, + "ConformancePackNamesToSummarizeList":{ + "type":"list", + "member":{"shape":"ConformancePackName"}, + "max":5, + "min":1 + }, + "ConformancePackRuleCompliance":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"ConfigRuleName"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackRuleComplianceList":{ + "type":"list", + "member":{"shape":"ConformancePackRuleCompliance"}, + "max":1000, + "min":0 + }, + "ConformancePackRuleEvaluationResultsList":{ + "type":"list", + "member":{"shape":"ConformancePackEvaluationResult"}, + "max":100, + "min":0 + }, + "ConformancePackState":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "DELETE_IN_PROGRESS", + "DELETE_FAILED" + ] + }, + "ConformancePackStatusDetail":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackId", + "ConformancePackArn", + "ConformancePackState", + "StackArn", + "LastUpdateRequestedTime" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackId":{"shape":"ConformancePackId"}, + "ConformancePackArn":{"shape":"ConformancePackArn"}, + "ConformancePackState":{"shape":"ConformancePackState"}, + "StackArn":{"shape":"StackArn"}, + "ConformancePackStatusReason":{"shape":"ConformancePackStatusReason"}, + "LastUpdateRequestedTime":{"shape":"Date"}, + "LastUpdateCompletedTime":{"shape":"Date"} + } + }, + "ConformancePackStatusDetailsList":{ + "type":"list", + "member":{"shape":"ConformancePackStatusDetail"}, + "max":25, + "min":0 + }, + "ConformancePackStatusReason":{ + "type":"string", + "max":2000, + "min":0 + }, + "ConformancePackTemplateValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "CosmosPageLimit":{ "type":"integer", "max":100, @@ -1436,6 +1824,13 @@ "ConfigurationRecorderName":{"shape":"RecorderName"} } }, + "DeleteConformancePackRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"} + } + }, "DeleteDeliveryChannelRequest":{ "type":"structure", "required":["DeliveryChannelName"], @@ -1462,6 +1857,13 @@ "OrganizationConfigRuleName":{"shape":"OrganizationConfigRuleName"} } }, + "DeleteOrganizationConformancePackRequest":{ + "type":"structure", + "required":["OrganizationConformancePackName"], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"} + } + }, "DeletePendingAggregationRequestRequest":{ "type":"structure", "required":[ @@ -1554,6 +1956,16 @@ "type":"list", "member":{"shape":"DeliveryChannelStatus"} }, + "DeliveryS3Bucket":{ + "type":"string", + "max":63, + "min":3 + }, + "DeliveryS3KeyPrefix":{ + "type":"string", + "max":1024, + "min":1 + }, "DeliveryStatus":{ "type":"string", "enum":[ @@ -1710,6 +2122,63 @@ "ConfigurationRecorders":{"shape":"ConfigurationRecorderList"} } }, + "DescribeConformancePackComplianceLimit":{ + "type":"integer", + "max":1000, + "min":0 + }, + "DescribeConformancePackComplianceRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "Filters":{"shape":"ConformancePackComplianceFilters"}, + "Limit":{"shape":"DescribeConformancePackComplianceLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackComplianceResponse":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackRuleComplianceList" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackRuleComplianceList":{"shape":"ConformancePackRuleComplianceList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackStatusRequest":{ + "type":"structure", + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackStatusResponse":{ + "type":"structure", + "members":{ + "ConformancePackStatusDetails":{"shape":"ConformancePackStatusDetailsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePacksRequest":{ + "type":"structure", + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePacksResponse":{ + "type":"structure", + "members":{ + "ConformancePackDetails":{"shape":"ConformancePackDetailList"}, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeDeliveryChannelStatusRequest":{ "type":"structure", "members":{ @@ -1764,6 +2233,36 @@ "NextToken":{"shape":"String"} } }, + "DescribeOrganizationConformancePackStatusesRequest":{ + "type":"structure", + "members":{ + "OrganizationConformancePackNames":{"shape":"OrganizationConformancePackNames"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePackStatusesResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackStatuses":{"shape":"OrganizationConformancePackStatuses"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePacksRequest":{ + "type":"structure", + "members":{ + "OrganizationConformancePackNames":{"shape":"OrganizationConformancePackNames"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePacksResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePacks":{"shape":"OrganizationConformancePacks"}, + "NextToken":{"shape":"String"} + } + }, "DescribePendingAggregationRequestsLimit":{ "type":"integer", "max":20, @@ -2108,6 +2607,46 @@ "ComplianceSummariesByResourceType":{"shape":"ComplianceSummariesByResourceType"} } }, + "GetConformancePackComplianceDetailsLimit":{ + "type":"integer", + "max":100, + "min":0 + }, + "GetConformancePackComplianceDetailsRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "Filters":{"shape":"ConformancePackEvaluationFilters"}, + "Limit":{"shape":"GetConformancePackComplianceDetailsLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceDetailsResponse":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackRuleEvaluationResults":{"shape":"ConformancePackRuleEvaluationResultsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceSummaryRequest":{ + "type":"structure", + "required":["ConformancePackNames"], + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesToSummarizeList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceSummaryResponse":{ + "type":"structure", + "members":{ + "ConformancePackComplianceSummaryList":{"shape":"ConformancePackComplianceSummaryList"}, + "NextToken":{"shape":"NextToken"} + } + }, "GetDiscoveredResourceCountsRequest":{ "type":"structure", "members":{ @@ -2141,6 +2680,23 @@ "NextToken":{"shape":"String"} } }, + "GetOrganizationConformancePackDetailedStatusRequest":{ + "type":"structure", + "required":["OrganizationConformancePackName"], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "Filters":{"shape":"OrganizationResourceDetailedStatusFilters"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "GetOrganizationConformancePackDetailedStatusResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackDetailedStatuses":{"shape":"OrganizationConformancePackDetailedStatuses"}, + "NextToken":{"shape":"String"} + } + }, "GetResourceConfigHistoryRequest":{ "type":"structure", "required":[ @@ -2357,6 +2913,12 @@ }, "exception":true }, + "MaxNumberOfConformancePacksExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaxNumberOfDeliveryChannelsExceededException":{ "type":"structure", "members":{ @@ -2369,6 +2931,12 @@ }, "exception":true }, + "MaxNumberOfOrganizationConformancePacksExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaxNumberOfRetentionConfigurationsExceededException":{ "type":"structure", "members":{ @@ -2391,12 +2959,12 @@ "CREATE_SUCCESSFUL", "CREATE_IN_PROGRESS", "CREATE_FAILED", - "UPDATE_SUCCESSFUL", - "UPDATE_FAILED", - "UPDATE_IN_PROGRESS", "DELETE_SUCCESSFUL", "DELETE_FAILED", - "DELETE_IN_PROGRESS" + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" ] }, "MemberAccountStatus":{ @@ -2462,6 +3030,12 @@ }, "exception":true }, + "NoSuchConfigRuleInConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchConfigurationAggregatorException":{ "type":"structure", "members":{ @@ -2474,6 +3048,12 @@ }, "exception":true }, + "NoSuchConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchDeliveryChannelException":{ "type":"structure", "members":{ @@ -2486,6 +3066,12 @@ }, "exception":true }, + "NoSuchOrganizationConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchRemediationConfigurationException":{ "type":"structure", "members":{ @@ -2591,6 +3177,84 @@ "type":"list", "member":{"shape":"OrganizationConfigRule"} }, + "OrganizationConformancePack":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "OrganizationConformancePackArn", + "DeliveryS3Bucket", + "LastUpdateTime" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "OrganizationConformancePackArn":{"shape":"StringWithCharLimit256"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "ExcludedAccounts":{"shape":"ExcludedAccounts"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackDetailedStatus":{ + "type":"structure", + "required":[ + "AccountId", + "ConformancePackName", + "Status" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "ConformancePackName":{"shape":"StringWithCharLimit256"}, + "Status":{"shape":"OrganizationResourceDetailedStatus"}, + "ErrorCode":{"shape":"String"}, + "ErrorMessage":{"shape":"String"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackDetailedStatuses":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackDetailedStatus"} + }, + "OrganizationConformancePackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "OrganizationConformancePackNames":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackName"}, + "max":25, + "min":0 + }, + "OrganizationConformancePackStatus":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "Status" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "Status":{"shape":"OrganizationResourceStatus"}, + "ErrorCode":{"shape":"String"}, + "ErrorMessage":{"shape":"String"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackStatuses":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackStatus"} + }, + "OrganizationConformancePackTemplateValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrganizationConformancePacks":{ + "type":"list", + "member":{"shape":"OrganizationConformancePack"} + }, "OrganizationCustomRuleMetadata":{ "type":"structure", "required":[ @@ -2623,18 +3287,53 @@ "TagValueScope":{"shape":"StringWithCharLimit256"} } }, - "OrganizationRuleStatus":{ + "OrganizationResourceDetailedStatus":{ "type":"string", "enum":[ "CREATE_SUCCESSFUL", "CREATE_IN_PROGRESS", "CREATE_FAILED", + "DELETE_SUCCESSFUL", + "DELETE_FAILED", + "DELETE_IN_PROGRESS", "UPDATE_SUCCESSFUL", - "UPDATE_FAILED", "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "OrganizationResourceDetailedStatusFilters":{ + "type":"structure", + "members":{ + "AccountId":{"shape":"AccountId"}, + "Status":{"shape":"OrganizationResourceDetailedStatus"} + } + }, + "OrganizationResourceStatus":{ + "type":"string", + "enum":[ + "CREATE_SUCCESSFUL", + "CREATE_IN_PROGRESS", + "CREATE_FAILED", "DELETE_SUCCESSFUL", "DELETE_FAILED", - "DELETE_IN_PROGRESS" + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "OrganizationRuleStatus":{ + "type":"string", + "enum":[ + "CREATE_SUCCESSFUL", + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "DELETE_SUCCESSFUL", + "DELETE_FAILED", + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" ] }, "OversizedConfigurationItemException":{ @@ -2650,6 +3349,21 @@ "AWS" ] }, + "PageSizeLimit":{ + "type":"integer", + "max":20, + "min":0 + }, + "ParameterName":{ + "type":"string", + "max":255, + "min":0 + }, + "ParameterValue":{ + "type":"string", + "max":4096, + "min":0 + }, "PendingAggregationRequest":{ "type":"structure", "members":{ @@ -2716,6 +3430,27 @@ "ConfigurationRecorder":{"shape":"ConfigurationRecorder"} } }, + "PutConformancePackRequest":{ + "type":"structure", + "required":[ + "ConformancePackName", + "DeliveryS3Bucket" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "TemplateS3Uri":{"shape":"TemplateS3Uri"}, + "TemplateBody":{"shape":"TemplateBody"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"} + } + }, + "PutConformancePackResponse":{ + "type":"structure", + "members":{ + "ConformancePackArn":{"shape":"ConformancePackArn"} + } + }, "PutDeliveryChannelRequest":{ "type":"structure", "required":["DeliveryChannel"], @@ -2754,6 +3489,28 @@ "OrganizationConfigRuleArn":{"shape":"StringWithCharLimit256"} } }, + "PutOrganizationConformancePackRequest":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "DeliveryS3Bucket" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "TemplateS3Uri":{"shape":"TemplateS3Uri"}, + "TemplateBody":{"shape":"TemplateBody"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "ExcludedAccounts":{"shape":"ExcludedAccounts"} + } + }, + "PutOrganizationConformancePackResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackArn":{"shape":"StringWithCharLimit256"} + } + }, "PutRemediationConfigurationsRequest":{ "type":"structure", "required":["RemediationConfigurations"], @@ -3306,6 +4063,11 @@ "ErrorPercentage":{"shape":"Percentage"} } }, + "StackArn":{ + "type":"string", + "max":2048, + "min":1 + }, "StartConfigRulesEvaluationRequest":{ "type":"structure", "members":{ @@ -3463,6 +4225,17 @@ "max":50, "min":0 }, + "TemplateBody":{ + "type":"string", + "max":51200, + "min":1 + }, + "TemplateS3Uri":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"s3://.*" + }, "TooManyTagsException":{ "type":"structure", "members":{ diff --git a/models/apis/config/2014-11-12/docs-2.json b/models/apis/config/2014-11-12/docs-2.json index 74426c39318..8017af42247 100644 --- a/models/apis/config/2014-11-12/docs-2.json +++ b/models/apis/config/2014-11-12/docs-2.json @@ -8,9 +8,11 @@ "DeleteConfigRule": "Deletes the specified AWS Config rule and all of its evaluation results.
AWS Config sets the state of a rule to DELETING
until the deletion is complete. You cannot update a rule while it is in this state. If you make a PutConfigRule
or DeleteConfigRule
request for the rule, you will receive a ResourceInUseException
.
You can check the state of a rule by using the DescribeConfigRules
request.
Deletes the specified configuration aggregator and the aggregated data associated with the aggregator.
", "DeleteConfigurationRecorder": "Deletes the configuration recorder.
After the configuration recorder is deleted, AWS Config will not record resource configuration changes until you create a new configuration recorder.
This action does not delete the configuration information that was previously recorded. You will be able to access the previously recorded information by using the GetResourceConfigHistory
action, but you will not be able to access this information in the AWS Config console until you create a new configuration recorder.
Deletes the specified conformance pack and all the AWS Config rules and all evaluation results within that conformance pack.
AWS Config sets the conformance pack to DELETE_IN_PROGRESS
until the deletion is complete. You cannot update a conformance pack while it is in this state.
Deletes the delivery channel.
Before you can delete the delivery channel, you must stop the configuration recorder by using the StopConfigurationRecorder action.
", "DeleteEvaluationResults": "Deletes the evaluation results for the specified AWS Config rule. You can specify one AWS Config rule per request. After you delete the evaluation results, you can call the StartConfigRulesEvaluation API to start evaluating your AWS resources against the rule.
", "DeleteOrganizationConfigRule": "Deletes the specified organization config rule and all of its evaluation results from all member accounts in that organization. Only a master account can delete an organization config rule.
AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a rule while it is in this state.
", + "DeleteOrganizationConformancePack": "Deletes the specified organization conformance pack and all of the config rules and remediation actions from all member accounts in that organization. Only a master account can delete an organization conformance pack.
AWS Config sets the state of a conformance pack to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a conformance pack while it is in this state.
", "DeletePendingAggregationRequest": "Deletes pending authorization requests for a specified aggregator account in a specified region.
", "DeleteRemediationConfiguration": "Deletes the remediation configuration.
", "DeleteRemediationExceptions": "Deletes one or more remediation exceptions mentioned in the resource keys.
", @@ -26,10 +28,15 @@ "DescribeConfigurationAggregators": "Returns the details of one or more configuration aggregators. If the configuration aggregator is not specified, this action returns the details for all the configuration aggregators associated with the account.
", "DescribeConfigurationRecorderStatus": "Returns the current status of the specified configuration recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account.
Currently, you can specify only one configuration recorder per region in your account.
Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account.
Currently, you can specify only one configuration recorder per region in your account.
Returns compliance information for each rule in that conformance pack.
You must provide exact rule names otherwise AWS Config cannot return evaluation results due to insufficient data.
Provides one or more conformance packs deployment status.
", + "DescribeConformancePacks": "Returns a list of one or more conformance packs.
", "DescribeDeliveryChannelStatus": "Returns the current status of the specified delivery channel. If a delivery channel is not specified, this action returns the current status of all delivery channels associated with the account.
Currently, you can specify only one delivery channel per region in your account.
Returns details about the specified delivery channel. If a delivery channel is not specified, this action returns the details of all delivery channels associated with the account.
Currently, you can specify only one delivery channel per region in your account.
Provides organization config rule deployment status for an organization.
The status is not considered successful until organization config rule is successfully deployed in all the member accounts with an exception of excluded accounts.
When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.
Only a master account can call this API.
Returns a list of organization config rules.
When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.
Only a master account can call this API.
Provides organization conformance pack deployment status for an organization.
The status is not considered successful until organization conformance pack is successfully deployed in all the member accounts with an exception of excluded accounts.
When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization conformance pack names. They are only applicable, when you request all the organization conformance packs.
Only a master account can call this API.
Returns a list of organization conformance packs.
When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs. Only a master account can call this API.
Returns a list of all pending aggregation requests.
", "DescribeRemediationConfigurations": "Returns the details of one or more remediation configurations.
", "DescribeRemediationExceptions": "Returns the details of one or more remediation exceptions. A detailed view of a remediation exception for a set of resources that includes an explanation of an exception and the time when the exception will be deleted. When you specify the limit and the next token, you receive a paginated response.
When you specify the limit and the next token, you receive a paginated response.
Limit and next token are not applicable if you request resources in batch. It is only applicable, when you request all resources.
Returns the evaluation results for the specified AWS resource. The results indicate which AWS Config rules were used to evaluate the resource, when each rule was last used, and whether the resource complies with each rule.
", "GetComplianceSummaryByConfigRule": "Returns the number of AWS Config rules that are compliant and noncompliant, up to a maximum of 25 for each.
", "GetComplianceSummaryByResourceType": "Returns the number of resources that are compliant and the number that are noncompliant. You can specify one or more resource types to get these numbers for each resource type. The maximum number returned is 100.
", + "GetConformancePackComplianceDetails": "Returns compliance details of a conformance pack for all AWS resources that are monitered by conformance pack.
", + "GetConformancePackComplianceSummary": null, "GetDiscoveredResourceCounts": "Returns the resource types, the number of each resource type, and the total number of resources that AWS Config is recording in this region for your AWS account.
Example
AWS Config is recording three resource types in the US East (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets.
You make a call to the GetDiscoveredResourceCounts
action and specify that you want all resource types.
AWS Config returns the following:
The resource types (EC2 instances, IAM users, and S3 buckets).
The number of each resource type (25, 20, and 15).
The total number of all resources (60).
The response is paginated. By default, AWS Config lists 100 ResourceCount objects on each page. You can customize this number with the limit
parameter. The response includes a nextToken
string. To get the next page of results, run the request again and specify the string for the nextToken
parameter.
If you make a call to the GetDiscoveredResourceCounts action, you might not immediately receive resource counts in the following situations:
You are a new AWS Config customer.
You just enabled resource recording.
It might take a few minutes for AWS Config to record and count your resources. Wait a few minutes and then retry the GetDiscoveredResourceCounts action.
Returns detailed status for each member account within an organization for a given organization config rule.
Only a master account can call this API.
Returns detailed status for each member account within an organization for a given organization conformance pack.
Only a master account can call this API.
", "GetResourceConfigHistory": "Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval. If you specified a retention period to retain your ConfigurationItems
between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config returns the ConfigurationItems
for the specified retention period.
The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit
parameter. The response includes a nextToken
string. To get the next page of results, run the request again and specify the string for the nextToken
parameter.
Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit
. In such cases, you can make another call, using the nextToken
.
Accepts a resource type and returns a list of resource identifiers that are aggregated for a specific resource type across accounts and regions. A resource identifier includes the resource type, ID, (if available) the custom resource name, source account, and source region. You can narrow the results to include only resources that have specific resource IDs, or a resource name, or source account ID, or source region.
For example, if the input consists of accountID 12345678910 and the region is us-east-1 for resource type AWS::EC2::Instance
then the API returns all the EC2 instance identifiers of accountID 12345678910 and region us-east-1.
Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.
You can specify either resource IDs or a resource name, but not both, in the same request.
The response is paginated. By default, AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit
parameter. The response includes a nextToken
string. To get the next page of results, run the request again and specify the string for the nextToken
parameter.
Adds or updates an AWS Config rule for evaluating whether your AWS resources comply with your desired configurations.
You can use this action for custom AWS Config rules and AWS managed Config rules. A custom AWS Config rule is a rule that you develop and maintain. An AWS managed Config rule is a customizable, predefined rule that AWS Config provides.
If you are adding a new custom AWS Config rule, you must first create the AWS Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule
action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. Specify the ARN for the SourceIdentifier
key. This key is part of the Source
object, which is part of the ConfigRule
object.
If you are adding an AWS managed Config rule, specify the rule's identifier for the SourceIdentifier
key. To reference AWS managed Config rule identifiers, see About AWS Managed Config Rules.
For any new rule that you add, specify the ConfigRuleName
in the ConfigRule
object. Do not specify the ConfigRuleArn
or the ConfigRuleId
. These values are generated by AWS Config for new rules.
If you are updating a rule that you added previously, you can specify the rule by ConfigRuleName
, ConfigRuleId
, or ConfigRuleArn
in the ConfigRule
data type that you use in this request.
The maximum number of rules that AWS Config supports is 150.
For information about requesting a rule limit increase, see AWS Config Limits in the AWS General Reference Guide.
For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.
", "PutConfigurationAggregator": "Creates and updates the configuration aggregator with the selected source accounts and regions. The source account can be individual account(s) or an organization.
AWS Config should be enabled in source accounts and regions you want to aggregate.
If your source type is an organization, you must be signed in to the master account and all features must be enabled in your organization. AWS Config calls EnableAwsServiceAccess
API to enable integration between AWS Config and AWS Organizations.
Creates a new configuration recorder to record the selected resource configurations.
You can use this action to change the role roleARN
or the recordingGroup
of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.
Currently, you can specify only one configuration recorder per region in your account.
If ConfigurationRecorder
does not have the recordingGroup parameter specified, the default is to record all supported resource types.
Creates or updates a conformance pack. A conformance pack is a collection of AWS Config rules that can be easily deployed in an account and a region.
This API creates a service linked role AWSServiceRoleForConfigConforms
in your account. The service linked role is created only when the role does not exist in your account. AWS Config verifies the existence of role with GetRole
action.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. If you provide both AWS Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Creates a delivery channel object to deliver configuration information to an Amazon S3 bucket and Amazon SNS topic.
Before you can create a delivery channel, you must create a configuration recorder.
You can use this action to change the Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values for the S3 bucket and the SNS topic. If you specify a different value for either the S3 bucket or the SNS topic, this action will keep the existing value for the parameter that is not changed.
You can have only one delivery channel per region in your account.
Used by an AWS Lambda function to deliver evaluation results to AWS Config. This action is required in every AWS Lambda function that is invoked by an AWS Config rule.
", "PutOrganizationConfigRule": "Adds or updates organization config rule for your entire organization evaluating whether your AWS resources comply with your desired configurations. Only a master account can create or update an organization config rule.
This API enables organization service access through the EnableAWSServiceAccess
action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup
in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole
action.
You can use this action to create both custom AWS Config rules and AWS managed Config rules. If you are adding a new custom AWS Config rule, you must first create AWS Lambda function in the master account that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule
action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed Config rule, specify the rule's identifier for the RuleIdentifier
key.
The maximum number of organization config rules that AWS Config supports is 150.
Specify either OrganizationCustomRuleMetadata
or OrganizationManagedRuleMetadata
.
Deploys conformance packs across member accounts in an AWS Organization.
This API enables organization service access through the EnableAWSServiceAccess
action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.
The SPN is config-multiaccountsetup.amazonaws.com
.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. If you provide both AWS Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Adds or updates the remediation configuration with a specific AWS Config rule with the selected target or action. The API creates the RemediationConfiguration
object for the AWS Config rule. The AWS Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.
A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.
", "PutRetentionConfiguration": "Creates and updates the retention configuration with details about retention period (number of days) that AWS Config stores your historical information. The API creates the RetentionConfiguration
object and names the object as default. When you have a RetentionConfiguration
object named default, calling the API modifies the default object.
Currently, AWS Config supports only one retention configuration per region in your account.
The 12-digit account ID of the source account.
", "MemberAccountStatus$AccountId": "The 12-digit account ID of a member account.
", + "OrganizationConformancePackDetailedStatus$AccountId": "The 12-digit account ID of a member account.
", + "OrganizationResourceDetailedStatusFilters$AccountId": "The 12-digit account ID of the member account within an organization.
", "PendingAggregationRequest$RequesterAccountId": "The 12-digit account ID of the account requesting to aggregate data.
", "PutAggregationAuthorizationRequest$AuthorizedAccountId": "The 12-digit account ID of the account authorized to aggregate data.
", "ResourceCountFilters$AccountId": "The 12-digit ID of the account.
", @@ -228,6 +242,12 @@ "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are ConfigRule
, ConfigurationAggregator
and AggregatorAuthorization
.
Supplementary information about how the evaluation determined the compliance.
" + } + }, "AutoRemediationAttemptSeconds": { "base": null, "refs": { @@ -477,6 +497,7 @@ "AggregateComplianceByConfigRule$ConfigRuleName": "The name of the AWS Config rule.
", "ConfigRuleComplianceFilters$ConfigRuleName": "The name of the AWS Config rule.
", "ConfigRuleNames$member": null, + "ConformancePackRuleCompliance$ConfigRuleName": "Filters the results by AWS Config rule name.
", "DeleteRemediationConfigurationRequest$ConfigRuleName": "The name of the AWS Config rule for which you want to delete remediation configuration.
", "DeleteRemediationExceptionsRequest$ConfigRuleName": "The name of the AWS Config rule for which you want to delete remediation exception configuration.
", "DescribeRemediationExceptionsRequest$ConfigRuleName": "The name of the AWS Config rule.
", @@ -642,12 +663,186 @@ "ConfigurationItem$configurationStateId": "An identifier that indicates the ordering of the configuration items of a resource.
" } }, + "ConformancePackArn": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackArn": "Amazon Resource Name (ARN) of the conformance pack.
", + "ConformancePackStatusDetail$ConformancePackArn": "Amazon Resource Name (ARN) of comformance pack.
", + "PutConformancePackResponse$ConformancePackArn": "ARN of the conformance pack.
" + } + }, + "ConformancePackComplianceFilters": { + "base": "Filters the conformance pack by compliance types and AWS Config rule names.
", + "refs": { + "DescribeConformancePackComplianceRequest$Filters": "A ConformancePackComplianceFilters
object.
Filters the results by resource IDs.
" + } + }, + "ConformancePackComplianceSummary": { + "base": null, + "refs": { + "ConformancePackComplianceSummaryList$member": null + } + }, + "ConformancePackComplianceSummaryList": { + "base": null, + "refs": { + "GetConformancePackComplianceSummaryResponse$ConformancePackComplianceSummaryList": null + } + }, + "ConformancePackComplianceType": { + "base": null, + "refs": { + "ConformancePackComplianceFilters$ComplianceType": "Filters the results by compliance.
The allowed values are COMPLIANT
and NON_COMPLIANT
.
Filters the results by compliance.
The allowed values are COMPLIANT
and NON_COMPLIANT
.
Filters the results by compliance.
The allowed values are COMPLIANT
and NON_COMPLIANT
.
Filters the results by compliance.
The allowed values are COMPLIANT
and NON_COMPLIANT
.
Filters the results by AWS Config rule names.
", + "ConformancePackEvaluationFilters$ConfigRuleNames": "Filters the results by AWS Config rule names.
" + } + }, + "ConformancePackDetail": { + "base": "Returns details of a conformance pack. A conformance pack is a collection of AWS Config rules that can be easily deployed in an account and a region.
", + "refs": { + "ConformancePackDetailList$member": null + } + }, + "ConformancePackDetailList": { + "base": null, + "refs": { + "DescribeConformancePacksResponse$ConformancePackDetails": "Returns a list of ConformancePackDetail
objects.
Filters a conformance pack by AWS Config rule names, compliance types, AWS resource types, and resource IDs.
", + "refs": { + "GetConformancePackComplianceDetailsRequest$Filters": "A ConformancePackEvaluationFilters
object.
The details of a conformance pack evaluation. Provides AWS Config rule and AWS resource type that was evaluated, the compliance of the conformance pack, related time stamps, and supplementary information.
", + "refs": { + "ConformancePackRuleEvaluationResultsList$member": null + } + }, + "ConformancePackId": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackId": "ID of the conformance pack.
", + "ConformancePackStatusDetail$ConformancePackId": "ID of the conformance pack.
" + } + }, + "ConformancePackInputParameter": { + "base": "Input parameters in the form of key-value pairs for the conformance pack, both of which you define. Keys can have a maximum character length of 128 characters, and values can have a maximum length of 256 characters.
", + "refs": { + "ConformancePackInputParameters$member": null + } + }, + "ConformancePackInputParameters": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackInputParameters": "A list of ConformancePackInputParameter
objects.
A list of ConformancePackInputParameter
objects.
A list of ConformancePackInputParameter
objects.
A list of ConformancePackInputParameter
objects.
Name of the conformance pack.
", + "ConformancePackNamesList$member": null, + "ConformancePackNamesToSummarizeList$member": null, + "ConformancePackStatusDetail$ConformancePackName": "Name of the conformance pack.
", + "DeleteConformancePackRequest$ConformancePackName": "Name of the conformance pack you want to delete.
", + "DescribeConformancePackComplianceRequest$ConformancePackName": "Name of the conformance pack.
", + "DescribeConformancePackComplianceResponse$ConformancePackName": "Name of the conformance pack.
", + "GetConformancePackComplianceDetailsRequest$ConformancePackName": "Name of the conformance pack.
", + "GetConformancePackComplianceDetailsResponse$ConformancePackName": "Name of the conformance pack.
", + "PutConformancePackRequest$ConformancePackName": "Name of the conformance pack you want to create.
" + } + }, + "ConformancePackNamesList": { + "base": null, + "refs": { + "DescribeConformancePackStatusRequest$ConformancePackNames": "Comma-separated list of conformance pack names.
", + "DescribeConformancePacksRequest$ConformancePackNames": "Comma-separated list of conformance pack names for which you want details. If you do not specify any names, AWS Config returns details for all your conformance packs.
" + } + }, + "ConformancePackNamesToSummarizeList": { + "base": null, + "refs": { + "GetConformancePackComplianceSummaryRequest$ConformancePackNames": null + } + }, + "ConformancePackRuleCompliance": { + "base": "Compliance information of one or more AWS Config rules within a conformance pack. You can filter using AWS Config rule names and compliance types.
", + "refs": { + "ConformancePackRuleComplianceList$member": null + } + }, + "ConformancePackRuleComplianceList": { + "base": null, + "refs": { + "DescribeConformancePackComplianceResponse$ConformancePackRuleComplianceList": "Returns a list of ConformancePackRuleCompliance
objects.
Returns a list of ConformancePackEvaluationResult
objects.
Indicates deployment status of conformance pack.
AWS Config sets the state of the conformance pack to:
CREATE_IN_PROGRESS when a conformance pack creation is in progress for an account.
CREATE_COMPLETE when a conformance pack has been successfully created in your account.
CREATE_FAILED when a conformance pack creation failed in your account.
DELETE_IN_PROGRESS when a conformance pack deletion is in progress.
DELETE_FAILED when a conformance pack deletion failed from your account.
Status details of a conformance pack.
", + "refs": { + "ConformancePackStatusDetailsList$member": null + } + }, + "ConformancePackStatusDetailsList": { + "base": null, + "refs": { + "DescribeConformancePackStatusResponse$ConformancePackStatusDetails": "A list of ConformancePackStatusDetail
objects.
The reason of conformance pack creation failure.
" + } + }, + "ConformancePackTemplateValidationException": { + "base": "You have specified a template that is not valid or supported.
", + "refs": { + } + }, "CosmosPageLimit": { "base": null, "refs": { "DescribeOrganizationConfigRuleStatusesRequest$Limit": "The maximum number of OrganizationConfigRuleStatuses
returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.
The maximum number of organization config rules returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.
", - "GetOrganizationConfigRuleDetailedStatusRequest$Limit": "The maximum number of OrganizationConfigRuleDetailedStatus
returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.
The maximum number of OrganizationConformancePackStatuses returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.
", + "DescribeOrganizationConformancePacksRequest$Limit": "The maximum number of organization config packs returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.
", + "GetOrganizationConfigRuleDetailedStatusRequest$Limit": "The maximum number of OrganizationConfigRuleDetailedStatus
returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.
The maximum number of OrganizationConformancePackDetailedStatuses
returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.
The time the recorder was last started.
", "ConfigurationRecorderStatus$lastStopTime": "The time the recorder was last stopped.
", "ConfigurationRecorderStatus$lastStatusChangeTime": "The time when the status was last changed.
", + "ConformancePackDetail$LastUpdateRequestedTime": "Last time when conformation pack update was requested.
", + "ConformancePackEvaluationResult$ConfigRuleInvokedTime": "The time when AWS Config rule evaluated AWS resource.
", + "ConformancePackEvaluationResult$ResultRecordedTime": "The time when AWS Config recorded the evaluation result.
", + "ConformancePackStatusDetail$LastUpdateRequestedTime": "Last time when conformation pack creation and update was requested.
", + "ConformancePackStatusDetail$LastUpdateCompletedTime": "Last time when conformation pack creation and update was successful.
", "EvaluationResult$ResultRecordedTime": "The time when AWS Config recorded the evaluation result.
", "EvaluationResult$ConfigRuleInvokedTime": "The time when the AWS Config rule evaluated the AWS resource.
", "EvaluationResultIdentifier$OrderingTimestamp": "The time of the event that triggered the evaluation of your AWS resources. The time can indicate when AWS Config delivered a configuration item change notification, or it can indicate when AWS Config delivered the configuration snapshot, depending on which event triggered the evaluation.
", "MemberAccountStatus$LastUpdateTime": "The timestamp of the last status update.
", "OrganizationConfigRule$LastUpdateTime": "The timestamp of the last update.
", "OrganizationConfigRuleStatus$LastUpdateTime": "The timestamp of the last update.
", + "OrganizationConformancePack$LastUpdateTime": "Last time when organization conformation pack was updated.
", + "OrganizationConformancePackDetailedStatus$LastUpdateTime": "The timestamp of the last status update.
", + "OrganizationConformancePackStatus$LastUpdateTime": "The timestamp of the last update.
", "PutRemediationExceptionsRequest$ExpirationTime": "The exception is automatically deleted after the expiration date.
", "RemediationException$ExpirationTime": "The time when the remediation exception will be deleted.
", "RemediationExecutionStatus$InvocationTime": "Start time when the remediation was executed.
", @@ -706,6 +909,11 @@ "refs": { } }, + "DeleteConformancePackRequest": { + "base": null, + "refs": { + } + }, "DeleteDeliveryChannelRequest": { "base": "The input for the DeleteDeliveryChannel action. The action accepts the following data, in JSON format.
", "refs": { @@ -726,6 +934,11 @@ "refs": { } }, + "DeleteOrganizationConformancePackRequest": { + "base": null, + "refs": { + } + }, "DeletePendingAggregationRequestRequest": { "base": null, "refs": { @@ -798,6 +1011,24 @@ "DescribeDeliveryChannelStatusResponse$DeliveryChannelsStatus": "A list that contains the status of a specified delivery channel.
" } }, + "DeliveryS3Bucket": { + "base": null, + "refs": { + "ConformancePackDetail$DeliveryS3Bucket": "Location of an Amazon S3 bucket where AWS Config can deliver evaluation results and conformance pack template that is used to create a pack.
", + "OrganizationConformancePack$DeliveryS3Bucket": "Location of an Amazon S3 bucket where AWS Config can deliver evaluation results and conformance pack template that is used to create a pack.
", + "PutConformancePackRequest$DeliveryS3Bucket": "Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. AWS Config stores intermediate files while processing conformance pack template.
", + "PutOrganizationConformancePackRequest$DeliveryS3Bucket": "Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. AWS Config stores intermediate files while processing conformance pack template.
" + } + }, + "DeliveryS3KeyPrefix": { + "base": null, + "refs": { + "ConformancePackDetail$DeliveryS3KeyPrefix": "Any folder structure you want to add to an Amazon S3 bucket.
", + "OrganizationConformancePack$DeliveryS3KeyPrefix": "Any folder structure you want to add to an Amazon S3 bucket.
", + "PutConformancePackRequest$DeliveryS3KeyPrefix": "The prefix for the Amazon S3 bucket.
", + "PutOrganizationConformancePackRequest$DeliveryS3KeyPrefix": "The prefix for the Amazon S3 bucket.
" + } + }, "DeliveryStatus": { "base": null, "refs": { @@ -905,6 +1136,42 @@ "refs": { } }, + "DescribeConformancePackComplianceLimit": { + "base": null, + "refs": { + "DescribeConformancePackComplianceRequest$Limit": "The maximum number of AWS Config rules within a conformance pack are returned on each page.
" + } + }, + "DescribeConformancePackComplianceRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackComplianceResponse": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackStatusResponse": { + "base": null, + "refs": { + } + }, + "DescribeConformancePacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePacksResponse": { + "base": null, + "refs": { + } + }, "DescribeDeliveryChannelStatusRequest": { "base": "The input for the DeliveryChannelStatus action.
", "refs": { @@ -945,6 +1212,26 @@ "refs": { } }, + "DescribeOrganizationConformancePackStatusesRequest": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePackStatusesResponse": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePacksResponse": { + "base": null, + "refs": { + } + }, "DescribePendingAggregationRequestsLimit": { "base": null, "refs": { @@ -1035,6 +1322,7 @@ "base": "Uniquely identifies an evaluation result.
", "refs": { "AggregateEvaluationResult$EvaluationResultIdentifier": "Uniquely identifies the evaluation result.
", + "ConformancePackEvaluationResult$EvaluationResultIdentifier": null, "EvaluationResult$EvaluationResultIdentifier": "Uniquely identifies the evaluation result.
" } }, @@ -1068,7 +1356,9 @@ "base": null, "refs": { "OrganizationConfigRule$ExcludedAccounts": "A comma-separated list of accounts excluded from organization config rule.
", - "PutOrganizationConfigRuleRequest$ExcludedAccounts": "A comma-separated list of accounts that you want to exclude from an organization config rule.
" + "OrganizationConformancePack$ExcludedAccounts": "A comma-separated list of accounts excluded from organization conformance pack.
", + "PutOrganizationConfigRuleRequest$ExcludedAccounts": "A comma-separated list of accounts that you want to exclude from an organization config rule.
", + "PutOrganizationConformancePackRequest$ExcludedAccounts": "A list of AWS accounts to be excluded from an organization conformance pack while deploying a conformance pack.
" } }, "ExecutionControls": { @@ -1212,6 +1502,32 @@ "refs": { } }, + "GetConformancePackComplianceDetailsLimit": { + "base": null, + "refs": { + "GetConformancePackComplianceDetailsRequest$Limit": "The maximum number of evaluation results returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.
" + } + }, + "GetConformancePackComplianceDetailsRequest": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceDetailsResponse": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceSummaryRequest": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceSummaryResponse": { + "base": null, + "refs": { + } + }, "GetDiscoveredResourceCountsRequest": { "base": null, "refs": { @@ -1232,6 +1548,16 @@ "refs": { } }, + "GetOrganizationConformancePackDetailedStatusRequest": { + "base": null, + "refs": { + } + }, + "GetOrganizationConformancePackDetailedStatusResponse": { + "base": null, + "refs": { + } + }, "GetResourceConfigHistoryRequest": { "base": "The input for the GetResourceConfigHistory action.
", "refs": { @@ -1274,7 +1600,7 @@ } }, "InsufficientPermissionsException": { - "base": "Indicates one of the following errors:
For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.
For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.
For OrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole
action or create service linked role.
Indicates one of the following errors:
For PutConfigRule, the rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.
For PutConfigRule, the AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.
For PutOrganizationConfigRule, organization config rule cannot be created because you do not have permissions to call IAM GetRole
action or create a service linked role.
For PutConformancePack and PutOrganizationConformancePack, a conformance pack cannot be created becuase you do not have permissions:
To call IAM GetRole
action or create a service linked role.
To read Amazon S3 bucket.
To create a rule and a stack.
You have reached the limit (20) of the number of conformance packs in an account.
", + "refs": { + } + }, "MaxNumberOfDeliveryChannelsExceededException": { "base": "You have reached the limit of the number of delivery channels you can create.
", "refs": { @@ -1438,6 +1769,11 @@ "refs": { } }, + "MaxNumberOfOrganizationConformancePacksExceededException": { + "base": "You have reached the limit (10) of the number of organization conformance packs in an account.
", + "refs": { + } + }, "MaxNumberOfRetentionConfigurationsExceededException": { "base": "Failed to add the retention configuration because a retention configuration with that name already exists.
", "refs": { @@ -1485,6 +1821,12 @@ "DescribeAggregateComplianceByConfigRulesResponse$NextToken": "The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The string that you use in a subsequent request to get the next page of results in a paginated response.
", + "DescribeConformancePackComplianceRequest$NextToken": "The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The string that you use in a subsequent request to get the next page of results in a paginated response.
", + "GetConformancePackComplianceDetailsRequest$NextToken": "The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The string that you use in a subsequent request to get the next page of results in a paginated response.
", "GetResourceConfigHistoryRequest$nextToken": "The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
AWS Config rule that you passed in the filter does not exist.
", + "refs": { + } + }, "NoSuchConfigurationAggregatorException": { "base": "You have specified a configuration aggregator that does not exist.
", "refs": { @@ -1549,6 +1900,11 @@ "refs": { } }, + "NoSuchConformancePackException": { + "base": "You specified one or more conformance packs that do not exist.
", + "refs": { + } + }, "NoSuchDeliveryChannelException": { "base": "You have specified a delivery channel that does not exist.
", "refs": { @@ -1559,6 +1915,11 @@ "refs": { } }, + "NoSuchOrganizationConformancePackException": { + "base": "AWS Config organization conformance pack that you passed in the filter does not exist.
For DeleteOrganizationConformancePack, you tried to delete an organization conformance pack that does not exist.
", + "refs": { + } + }, "NoSuchRemediationConfigurationException": { "base": "You specified an AWS Config rule without a remediation configuration.
", "refs": { @@ -1581,7 +1942,7 @@ } }, "OrganizationAccessDeniedException": { - "base": "For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.
For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.
", + "base": "For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.
For all OrganizationConfigRule and OrganizationConformancePack APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.
", "refs": { } }, @@ -1653,7 +2014,66 @@ "OrganizationConfigRules": { "base": null, "refs": { - "DescribeOrganizationConfigRulesResponse$OrganizationConfigRules": "Retuns a list OrganizationConfigRule
objects.
Returns a list of OrganizationConfigRule
objects.
An organization conformance pack that has information about conformance packs that AWS Config creates in member accounts.
", + "refs": { + "OrganizationConformancePacks$member": null + } + }, + "OrganizationConformancePackDetailedStatus": { + "base": "Organization conformance pack creation or deletion status in each member account. This includes the name of the conformance pack, the status, error code and error message when the conformance pack creation or deletion failed.
", + "refs": { + "OrganizationConformancePackDetailedStatuses$member": null + } + }, + "OrganizationConformancePackDetailedStatuses": { + "base": null, + "refs": { + "GetOrganizationConformancePackDetailedStatusResponse$OrganizationConformancePackDetailedStatuses": "A list of OrganizationConformancePackDetailedStatus
objects.
The name of organization conformance pack that you want to delete.
", + "GetOrganizationConformancePackDetailedStatusRequest$OrganizationConformancePackName": "The name of organization conformance pack for which you want status details for member accounts.
", + "OrganizationConformancePack$OrganizationConformancePackName": "The name you assign to an organization conformance pack.
", + "OrganizationConformancePackNames$member": null, + "OrganizationConformancePackStatus$OrganizationConformancePackName": "The name that you assign to organization conformance pack.
", + "PutOrganizationConformancePackRequest$OrganizationConformancePackName": "Name of the organization conformance pack you want to create.
" + } + }, + "OrganizationConformancePackNames": { + "base": null, + "refs": { + "DescribeOrganizationConformancePackStatusesRequest$OrganizationConformancePackNames": "The names of organization conformance packs for which you want status details. If you do not specify any names, AWS Config returns details for all your organization conformance packs.
", + "DescribeOrganizationConformancePacksRequest$OrganizationConformancePackNames": "The name that you assign to an organization conformance pack.
" + } + }, + "OrganizationConformancePackStatus": { + "base": "Returns the status for an organization conformance pack in an organization.
", + "refs": { + "OrganizationConformancePackStatuses$member": null + } + }, + "OrganizationConformancePackStatuses": { + "base": null, + "refs": { + "DescribeOrganizationConformancePackStatusesResponse$OrganizationConformancePackStatuses": "A list of OrganizationConformancePackStatus
objects.
You have specified a template that is not valid or supported.
", + "refs": { + } + }, + "OrganizationConformancePacks": { + "base": null, + "refs": { + "DescribeOrganizationConformancePacksResponse$OrganizationConformancePacks": "Returns a list of OrganizationConformancePacks objects.
" } }, "OrganizationCustomRuleMetadata": { @@ -1670,6 +2090,25 @@ "PutOrganizationConfigRuleRequest$OrganizationManagedRuleMetadata": "An OrganizationManagedRuleMetadata
object.
Indicates deployment status for conformance pack in a member account. When master account calls PutOrganizationConformancePack
action for the first time, conformance pack status is created in the member account. When master account calls PutOrganizationConformancePack
action for the second time, conformance pack status is updated in the member account. Conformance pack status is deleted when the master account deletes OrganizationConformancePack
and disables service access for config-multiaccountsetup.amazonaws.com
.
AWS Config sets the state of the conformance pack to:
CREATE_SUCCESSFUL
when conformance pack has been created in the member account.
CREATE_IN_PROGRESS
when conformance pack is being created in the member account.
CREATE_FAILED
when conformance pack creation has failed in the member account.
DELETE_FAILED
when conformance pack deletion has failed in the member account.
DELETE_IN_PROGRESS
when conformance pack is being deleted in the member account.
DELETE_SUCCESSFUL
when conformance pack has been deleted in the member account.
UPDATE_SUCCESSFUL
when conformance pack has been updated in the member account.
UPDATE_IN_PROGRESS
when conformance pack is being updated in the member account.
UPDATE_FAILED
when conformance pack deletion has failed in the member account.
Indicates deployment status for conformance pack in a member account. When master account calls PutOrganizationConformancePack
action for the first time, conformance pack status is created in the member account. When master account calls PutOrganizationConformancePack
action for the second time, conformance pack status is updated in the member account. Conformance pack status is deleted when the master account deletes OrganizationConformancePack
and disables service access for config-multiaccountsetup.amazonaws.com
.
AWS Config sets the state of the conformance pack to:
CREATE_SUCCESSFUL
when conformance pack has been created in the member account.
CREATE_IN_PROGRESS
when conformance pack is being created in the member account.
CREATE_FAILED
when conformance pack creation has failed in the member account.
DELETE_FAILED
when conformance pack deletion has failed in the member account.
DELETE_IN_PROGRESS
when conformance pack is being deleted in the member account.
DELETE_SUCCESSFUL
when conformance pack has been deleted in the member account.
UPDATE_SUCCESSFUL
when conformance pack has been updated in the member account.
UPDATE_IN_PROGRESS
when conformance pack is being updated in the member account.
UPDATE_FAILED
when conformance pack deletion has failed in the member account.
Status filter object to filter results based on specific member account ID or status type for an organization conformance pack.
", + "refs": { + "GetOrganizationConformancePackDetailedStatusRequest$Filters": "An OrganizationResourceDetailedStatusFilters
object.
Indicates deployment status of an organization conformance pack. When master account calls PutOrganizationConformancePack for the first time, conformance pack status is created in all the member accounts. When master account calls PutOrganizationConformancePack for the second time, conformance pack status is updated in all the member accounts. Additionally, conformance pack status is updated when one or more member accounts join or leave an organization. Conformance pack status is deleted when the master account deletes OrganizationConformancePack in all the member accounts and disables service access for config-multiaccountsetup.amazonaws.com
.
AWS Config sets the state of the conformance pack to:
CREATE_SUCCESSFUL
when an organization conformance pack has been successfully created in all the member accounts.
CREATE_IN_PROGRESS
when an organization conformance pack creation is in progress.
CREATE_FAILED
when an organization conformance pack creation failed in one or more member accounts within that organization.
DELETE_FAILED
when an organization conformance pack deletion failed in one or more member accounts within that organization.
DELETE_IN_PROGRESS
when an organization conformance pack deletion is in progress.
DELETE_SUCCESSFUL
when an organization conformance pack has been successfully deleted from all the member accounts.
UPDATE_SUCCESSFUL
when an organization conformance pack has been successfully updated in all the member accounts.
UPDATE_IN_PROGRESS
when an organization conformance pack update is in progress.
UPDATE_FAILED
when an organization conformance pack update failed in one or more member accounts within that organization.
Indicates whether AWS or the customer owns and manages the AWS Config rule.
" } }, + "PageSizeLimit": { + "base": null, + "refs": { + "DescribeConformancePackStatusRequest$Limit": "The maximum number of conformance packs returned on each page.
", + "DescribeConformancePacksRequest$Limit": "The maximum number of conformance packs returned on each page.
", + "GetConformancePackComplianceSummaryRequest$Limit": null + } + }, + "ParameterName": { + "base": null, + "refs": { + "ConformancePackInputParameter$ParameterName": "One part of a key-value pair.
" + } + }, + "ParameterValue": { + "base": null, + "refs": { + "ConformancePackInputParameter$ParameterValue": "Another part of the key-value pair.
" + } + }, "PendingAggregationRequest": { "base": "An object that represents the account ID and region of an aggregator account that is requesting authorization but is not yet authorized.
", "refs": { @@ -1736,6 +2195,16 @@ "refs": { } }, + "PutConformancePackRequest": { + "base": null, + "refs": { + } + }, + "PutConformancePackResponse": { + "base": null, + "refs": { + } + }, "PutDeliveryChannelRequest": { "base": "The input for the PutDeliveryChannel action.
", "refs": { @@ -1761,6 +2230,16 @@ "refs": { } }, + "PutOrganizationConformancePackRequest": { + "base": null, + "refs": { + } + }, + "PutOrganizationConformancePackResponse": { + "base": null, + "refs": { + } + }, "PutRemediationConfigurationsRequest": { "base": null, "refs": { @@ -2038,7 +2517,7 @@ } }, "ResourceInUseException": { - "base": "You see this exception in the following cases:
For DeleteConfigRule API, AWS Config is deleting this rule. Try your request again later.
For DeleteConfigRule API, the rule is deleting your evaluation results. Try your request again later.
For DeleteConfigRule API, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.
For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.
For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.
You see this exception in the following cases:
For DeleteConfigRule, AWS Config is deleting this rule. Try your request again later.
For DeleteConfigRule, the rule is deleting your evaluation results. Try your request again later.
For DeleteConfigRule, a remediation action is associated with the rule and AWS Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.
For PutConfigOrganizationRule, organization config rule deletion is in progress. Try your request again later.
For DeleteOrganizationConfigRule, organization config rule creation is in progress. Try your request again later.
For PutConformancePack and PutOrganizationConformancePack, a conformance pack creation, update, and deletion is in progress. Try your request again later.
For DeleteConformancePack, a conformance pack creation, update, and deletion is in progress. Try your request again later.
A SsmControls object.
" } }, + "StackArn": { + "base": null, + "refs": { + "ConformancePackStatusDetail$StackArn": "Amazon Resource Name (ARN) of AWS CloudFormation stack.
" + } + }, "StartConfigRulesEvaluationRequest": { "base": "", "refs": { @@ -2308,6 +2793,10 @@ "DescribeOrganizationConfigRuleStatusesResponse$NextToken": "The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", + "DescribeOrganizationConformancePackStatusesResponse$NextToken": "The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", + "DescribeOrganizationConformancePacksRequest$NextToken": "The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", + "DescribeOrganizationConformancePacksResponse$NextToken": "The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", "DescribePendingAggregationRequestsRequest$NextToken": "The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned in a previous request that you use to request the next page of results in a paginated response.
The string that you use in a subsequent request to get the next page of results in a paginated response.
", "GetOrganizationConfigRuleDetailedStatusRequest$NextToken": "The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken
string returned on a previous page that you use to get the next page of results in a paginated response.
The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", + "GetOrganizationConformancePackDetailedStatusResponse$NextToken": "The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.
", "MemberAccountStatus$ErrorCode": "An error code that is returned when config rule creation or deletion failed in the member account.
", "MemberAccountStatus$ErrorMessage": "An error message indicating that config rule account creation or deletion has failed due to an error in the member account.
", "OrganizationAggregationSource$RoleArn": "ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.
", "OrganizationConfigRuleStatus$ErrorCode": "An error code that is returned when organization config rule creation or deletion has failed.
", "OrganizationConfigRuleStatus$ErrorMessage": "An error message indicating that organization config rule creation or deletion failed due to an error.
", + "OrganizationConformancePackDetailedStatus$ErrorCode": "An error code that is returned when conformance pack creation or deletion failed in the member account.
", + "OrganizationConformancePackDetailedStatus$ErrorMessage": "An error message indicating that conformance pack account creation or deletion has failed due to an error in the member account.
", + "OrganizationConformancePackStatus$ErrorCode": "An error code that is returned when organization conformance pack creation or deletion has failed in the member account.
", + "OrganizationConformancePackStatus$ErrorMessage": "An error message indicating that organization conformance pack creation or deletion failed due to an error.
", "PutEvaluationsRequest$ResultToken": "An encrypted token that associates an evaluation with an AWS Config rule. Identifies the rule and the event that triggered the evaluation.
", "RemediationConfiguration$TargetVersion": "Version of the target. For example, version of the SSM document.
", "RemediationConfiguration$ResourceType": "The type of a resource.
", @@ -2372,6 +2867,9 @@ "ComplianceResourceTypes$member": null, "ComplianceSummaryByResourceType$ResourceType": "The type of AWS resource.
", "ConfigRule$CreatedBy": "Service principal name of the service that created the rule.
The field is populated only if the service linked rule is created by a service. The field is empty if you create your own rule.
Filters the results by the resource type (for example, \"AWS::EC2::Instance\"
).
The types of AWS resources for which you want compliance information (for example, AWS::EC2::Instance
). For this action, you can specify that the resource type is an AWS account by specifying AWS::::Account
.
The type of AWS resource that was evaluated.
", "Evaluation$Annotation": "Supplementary information about how the evaluation determined the compliance.
", @@ -2381,12 +2879,15 @@ "GetAggregateDiscoveredResourceCountsResponse$GroupByKey": "The key passed into the request object. If GroupByKey
is not provided, the result will be empty.
The type of the AWS resource for which you want compliance information.
", "GroupedResourceCount$GroupName": "The name of the group that can be region, account ID, or resource type. For example, region1, region2 if the region was chosen as GroupByKey
.
The Amazon Resource Name (ARN) of organization config rule.
", + "OrganizationConfigRule$OrganizationConfigRuleArn": "Amazon Resource Name (ARN) of organization config rule.
", + "OrganizationConformancePack$OrganizationConformancePackArn": "Amazon Resource Name (ARN) of organization conformance pack.
", + "OrganizationConformancePackDetailedStatus$ConformancePackName": "The name of conformance pack deployed in the member account.
", "OrganizationCustomRuleMetadata$LambdaFunctionArn": "The lambda function ARN.
", "OrganizationCustomRuleMetadata$TagValueScope": "The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).
", "OrganizationManagedRuleMetadata$RuleIdentifier": "For organization config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY
is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.
The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).
", "PutOrganizationConfigRuleResponse$OrganizationConfigRuleArn": "The Amazon Resource Name (ARN) of an organization config rule.
", + "PutOrganizationConformancePackResponse$OrganizationConformancePackArn": "ARN of the organization conformance pack.
", "RemediationConfiguration$TargetId": "Target ID is the name of the public document.
", "RemediationException$ResourceType": "The type of a resource.
", "RemediationExceptionResourceKey$ResourceType": "The type of a resource.
", @@ -2411,6 +2912,7 @@ "ComplianceByConfigRule$ConfigRuleName": "The name of the AWS Config rule.
", "ConfigRule$ConfigRuleName": "The name that you assign to the AWS Config rule. The name is required if you are adding a new rule.
", "ConfigRuleEvaluationStatus$ConfigRuleName": "The name of the AWS Config rule.
", + "ConformancePackConfigRuleNames$member": null, "DeleteConfigRuleRequest$ConfigRuleName": "The name of the AWS Config rule that you want to delete.
", "DeleteEvaluationResultsRequest$ConfigRuleName": "The name of the AWS Config rule for which you want to delete the evaluation results.
", "EvaluationResultQualifier$ConfigRuleName": "The name of the AWS Config rule that was used in the evaluation.
", @@ -2498,6 +3000,20 @@ "PutConfigurationAggregatorRequest$Tags": "An array of tag object.
" } }, + "TemplateBody": { + "base": null, + "refs": { + "PutConformancePackRequest$TemplateBody": "A string containing full conformance pack template body. Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.
You can only use a YAML template with one resource type, that is, config rule.
A string containing full conformance pack template body. Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.
" + } + }, + "TemplateS3Uri": { + "base": null, + "refs": { + "PutConformancePackRequest$TemplateS3Uri": "Location of file containing the template body. The uri must point to the conformance pack template (max size: 300,000 bytes) that is located in an Amazon S3 bucket in the same region as the conformance pack.
You must have access to read Amazon S3 bucket.
Location of file containing the template body. The uri must point to the conformance pack template (max size: 300,000 bytes).
You must have access to read Amazon S3 bucket.
You have reached the limit of the number of tags you can use. You have more than 50 tags.
", "refs": { diff --git a/models/apis/connect/2017-08-08/api-2.json b/models/apis/connect/2017-08-08/api-2.json index 2c626e13e07..801c45db8fb 100644 --- a/models/apis/connect/2017-08-08/api-2.json +++ b/models/apis/connect/2017-08-08/api-2.json @@ -253,6 +253,22 @@ {"shape":"InternalServiceException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, "ListUserHierarchyGroups":{ "name":"ListUserHierarchyGroups", "http":{ @@ -319,6 +335,36 @@ {"shape":"InternalServiceException"} ] }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, "UpdateContactAttributes":{ "name":"UpdateContactAttributes", "http":{ @@ -537,7 +583,8 @@ "shape":"InstanceId", "location":"uri", "locationName":"InstanceId" - } + }, + "Tags":{"shape":"TagMap"} } }, "CreateUserResponse":{ @@ -1210,6 +1257,23 @@ "NextToken":{"shape":"NextToken"} } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"TagMap"} + } + }, "ListUserHierarchyGroupsRequest":{ "type":"structure", "required":["InstanceId"], @@ -1718,6 +1782,44 @@ "members":{ } }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"TagMap"} + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "Threshold":{ "type":"structure", "members":{ @@ -1745,6 +1847,25 @@ "PERCENT" ] }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, "UpdateContactAttributesRequest":{ "type":"structure", "required":[ @@ -1878,7 +1999,8 @@ "DirectoryUserId":{"shape":"DirectoryUserId"}, "SecurityProfileIds":{"shape":"SecurityProfileIds"}, "RoutingProfileId":{"shape":"RoutingProfileId"}, - "HierarchyGroupId":{"shape":"HierarchyGroupId"} + "HierarchyGroupId":{"shape":"HierarchyGroupId"}, + "Tags":{"shape":"TagMap"} } }, "UserId":{"type":"string"}, diff --git a/models/apis/connect/2017-08-08/docs-2.json b/models/apis/connect/2017-08-08/docs-2.json index e321b6592e7..c6ace89b68f 100644 --- a/models/apis/connect/2017-08-08/docs-2.json +++ b/models/apis/connect/2017-08-08/docs-2.json @@ -4,7 +4,7 @@ "operations": { "CreateUser": "Creates a user account for the specified Amazon Connect instance.
", "DeleteUser": "Deletes a user account from the specified Amazon Connect instance.
", - "DescribeUser": "Describes the specified user account.
", + "DescribeUser": "Describes the specified user account. You can find the instance ID in the console (it’s the final part of the ARN). The console does not display the user IDs. Instead, list the users and note the IDs provided in the output.
", "DescribeUserHierarchyGroup": "Describes the specified hierarchy group.
", "DescribeUserHierarchyStructure": "Describes the hierarchy structure of the specified Amazon Connect instance.
", "GetContactAttributes": "Retrieves the contact attributes for the specified contact.
", @@ -17,10 +17,13 @@ "ListQueues": "Provides information about the queues for the specified Amazon Connect instance.
", "ListRoutingProfiles": "Provides summary information about the routing profiles for the specified Amazon Connect instance.
", "ListSecurityProfiles": "Provides summary information about the security profiles for the specified Amazon Connect instance.
", + "ListTagsForResource": "Lists the tags for the specified resource.
", "ListUserHierarchyGroups": "Provides summary information about the hierarchy groups for the specified Amazon Connect instance.
", "ListUsers": "Provides summary information about the users for the specified Amazon Connect instance.
", "StartOutboundVoiceContact": "Initiates a contact flow to place an outbound call to a customer.
There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.
", "StopContact": "Ends the specified contact.
", + "TagResource": "Adds the specified tags to the specified resource.
The supported resource type is users.
", + "UntagResource": "Removes the specified tags from the specified resource.
", "UpdateContactAttributes": "Creates or updates the contact attributes associated with the specified contact.
You can add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.
Contact attributes are available in Amazon Connect for 24 months, and are then deleted.
Important: You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.
", "UpdateUserHierarchy": "Assigns the specified hierarchy group to the specified user.
", "UpdateUserIdentityInfo": "Updates the identity information for the specified user.
", @@ -38,11 +41,14 @@ "HierarchyGroupSummary$Arn": "The Amazon Resource Name (ARN) of the hierarchy group.
", "HierarchyLevel$Arn": "The Amazon Resource Name (ARN) of the hierarchy level.
", "HoursOfOperationSummary$Arn": "The Amazon Resource Name (ARN) of the hours of operation.
", + "ListTagsForResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource.
", "PhoneNumberSummary$Arn": "The Amazon Resource Name (ARN) of the phone number.
", "QueueReference$Arn": "The Amazon Resource Name (ARN) of the queue.
", "QueueSummary$Arn": "The Amazon Resource Name (ARN) of the queue.
", "RoutingProfileSummary$Arn": "The Amazon Resource Name (ARN) of the routing profile.
", "SecurityProfileSummary$Arn": "The Amazon Resource Name (ARN) of the security profile.
", + "TagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource.
", + "UntagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource.
", "User$Arn": "The Amazon Resource Name (ARN) of the user account.
", "UserSummary$Arn": "The Amazon Resource Name (ARN) of the user account.
" } @@ -613,6 +619,16 @@ "refs": { } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "ListUserHierarchyGroupsRequest": { "base": null, "refs": { @@ -914,6 +930,39 @@ "refs": { } }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$tagKeys": "The tag keys.
" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateUserRequest$Tags": "One or more tags.
", + "ListTagsForResourceResponse$tags": "Information about the tags.
", + "TagResourceRequest$tags": "One or more tags. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.
", + "User$Tags": "The tags.
" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, "Threshold": { "base": "Contains information about the threshold for service level metrics.
", "refs": { @@ -938,6 +987,11 @@ "HistoricalMetric$Unit": "The unit for the metric.
" } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, "UpdateContactAttributesRequest": { "base": null, "refs": { diff --git a/models/apis/dataexchange/2017-07-25/api-2.json b/models/apis/dataexchange/2017-07-25/api-2.json new file mode 100644 index 00000000000..9c4e91b4c58 --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/api-2.json @@ -0,0 +1,2263 @@ +{ + "metadata": { + "apiVersion": "2017-07-25", + "endpointPrefix": "dataexchange", + "signingName": "dataexchange", + "serviceFullName": "AWS Data Exchange", + "serviceId": "DataExchange", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "dataexchange-2017-07-25", + "signatureVersion": "v4" + }, + "operations": { + "CancelJob": { + "name": "CancelJob", + "http": { + "method": "DELETE", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 204 + }, + "input": { + "shape": "CancelJobRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + } + ] + }, + "CreateDataSet": { + "name": "CreateDataSet", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets", + "responseCode": 201 + }, + "input": { + "shape": "CreateDataSetRequest" + }, + "output": { + "shape": "CreateDataSetResponse" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ServiceLimitExceededException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "CreateJob": { + "name": "CreateJob", + "http": { + "method": "POST", + "requestUri": "/v1/jobs", + "responseCode": 201 + }, + "input": { + "shape": "CreateJobRequest" + }, + "output": { + "shape": "CreateJobResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "CreateRevision": { + "name": "CreateRevision", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 201 + }, + "input": { + "shape": "CreateRevisionRequest" + }, + "output": { + "shape": "CreateRevisionResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "DeleteAsset": { + "name": "DeleteAsset", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteAssetRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "DeleteDataSet": { + "name": "DeleteDataSet", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteDataSetRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "DeleteRevision": { + "name": "DeleteRevision", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRevisionRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "GetAsset": { + "name": "GetAsset", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetAssetRequest" + }, + "output": { + "shape": "GetAssetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetDataSet": { + "name": "GetDataSet", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetDataSetRequest" + }, + "output": { + "shape": "GetDataSetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetJob": { + "name": "GetJob", + "http": { + "method": "GET", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 200 + }, + "input": { + "shape": "GetJobRequest" + }, + "output": { + "shape": "GetJobResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetRevision": { + "name": "GetRevision", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetRevisionRequest" + }, + "output": { + "shape": "GetRevisionResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListDataSetRevisions": { + "name": "ListDataSetRevisions", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetRevisionsRequest" + }, + "output": { + "shape": "ListDataSetRevisionsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListDataSets": { + "name": "ListDataSets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetsRequest" + }, + "output": { + "shape": "ListDataSetsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListJobs": { + "name": "ListJobs", + "http": { + "method": "GET", + "requestUri": "/v1/jobs", + "responseCode": 200 + }, + "input": { + "shape": "ListJobsRequest" + }, + "output": { + "shape": "ListJobsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListRevisionAssets": { + "name": "ListRevisionAssets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets", + "responseCode": 200 + }, + "input": { + "shape": "ListRevisionAssetsRequest" + }, + "output": { + "shape": "ListRevisionAssetsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse" + }, + "errors": [] + }, + "StartJob": { + "name": "StartJob", + "http": { + "method": "PATCH", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 202 + }, + "input": { + "shape": "StartJobRequest" + }, + "output": { + "shape": "StartJobResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "errors": [] + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "errors": [] + }, + "UpdateAsset": { + "name": "UpdateAsset", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateAssetRequest" + }, + "output": { + "shape": "UpdateAssetResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "UpdateDataSet": { + "name": "UpdateDataSet", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateDataSetRequest" + }, + "output": { + "shape": "UpdateDataSetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "UpdateRevision": { + "name": "UpdateRevision", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRevisionRequest" + }, + "output": { + "shape": "UpdateRevisionResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + } + }, + "shapes": { + "AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "Arn": { + "type": "string" + }, + "AssetDestinationEntry": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "Bucket": { + "shape": "__string" + }, + "Key": { + "shape": "__string" + } + }, + "required": [ + "Bucket", + "AssetId" + ] + }, + "AssetDetails": { + "type": "structure", + "members": { + "S3SnapshotAsset": { + "shape": "S3SnapshotAsset" + } + } + }, + "AssetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "AssetType", + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "AssetDetails", + "UpdatedAt", + "RevisionId", + "Name" + ] + }, + "AssetName": { + "type": "string" + }, + "AssetSourceEntry": { + "type": "structure", + "members": { + "Bucket": { + "shape": "__string" + }, + "Key": { + "shape": "__string" + } + }, + "required": [ + "Bucket", + "Key" + ] + }, + "AssetType": { + "type": "string", + "enum": [ + "S3_SNAPSHOT" + ] + }, + "CancelJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "Code": { + "type": "string", + "enum": [ + "ACCESS_DENIED_EXCEPTION", + "INTERNAL_SERVER_EXCEPTION", + "MALWARE_DETECTED", + "RESOURCE_NOT_FOUND_EXCEPTION", + "SERVICE_QUOTA_EXCEEDED_EXCEPTION", + "VALIDATION_EXCEPTION", + "MALWARE_SCAN_ENCRYPTED_FILE" + ] + }, + "ConflictException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "ResourceType" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateDataSetRequest": { + "type": "structure", + "members": { + "AssetType": { + "shape": "AssetType" + }, + "Description": { + "shape": "Description" + }, + "Name": { + "shape": "Name" + }, + "Tags": { + "shape": "MapOf__string" + } + }, + "required": [ + "AssetType", + "Description", + "Name" + ] + }, + "CreateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "CreateJobRequest": { + "type": "structure", + "members": { + "Details": { + "shape": "RequestDetails" + }, + "Type": { + "shape": "Type" + } + }, + "required": [ + "Type", + "Details" + ] + }, + "CreateJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "CreateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Tags": { + "shape": "MapOf__string" + } + }, + "required": [ + "DataSetId" + ] + }, + "CreateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "DataSetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "Origin", + "AssetType", + "Description", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt", + "Name" + ] + }, + "DeleteAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "DeleteDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + } + }, + "required": [ + "DataSetId" + ] + }, + "DeleteRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "Description": { + "type": "string" + }, + "Details": { + "type": "structure", + "members": { + "ImportAssetFromSignedUrlJobErrorDetails": { + "shape": "ImportAssetFromSignedUrlJobErrorDetails" + }, + "ImportAssetsFromS3JobErrorDetails": { + "shape": "ListOfAssetSourceEntry" + } + } + }, + "ExportAssetToSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetToSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + }, + "SignedUrl": { + "shape": "__string" + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp" + } + }, + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetsToS3RequestDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "ExportAssetsToS3ResponseDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "GetAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "GetAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + } + }, + "required": [ + "DataSetId" + ] + }, + "GetDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "GetJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "GetRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "Id": { + "type": "string" + }, + "ImportAssetFromSignedUrlJobErrorDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + } + }, + "required": [ + "AssetName" + ] + }, + "ImportAssetFromSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + }, + "DataSetId": { + "shape": "Id" + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "Md5Hash", + "RevisionId", + "AssetName" + ] + }, + "ImportAssetFromSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + }, + "DataSetId": { + "shape": "Id" + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" + }, + "RevisionId": { + "shape": "Id" + }, + "SignedUrl": { + "shape": "__string" + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp" + } + }, + "required": [ + "DataSetId", + "AssetName", + "RevisionId" + ] + }, + "ImportAssetsFromS3RequestDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "ImportAssetsFromS3ResponseDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "InternalServerException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "JobEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "Type", + "Details", + "State", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "JobError": { + "type": "structure", + "members": { + "Code": { + "shape": "Code" + }, + "Details": { + "shape": "Details" + }, + "LimitName": { + "shape": "JobErrorLimitName" + }, + "LimitValue": { + "shape": "__double" + }, + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "JobErrorResourceTypes" + } + }, + "required": [ + "Message", + "Code" + ] + }, + "JobErrorLimitName": { + "type": "string", + "enum": [ + "Assets per revision", + "Asset size in GB" + ] + }, + "JobErrorResourceTypes": { + "type": "string", + "enum": [ + "REVISION", + "ASSET" + ] + }, + "LimitName": { + "type": "string", + "enum": [ + "Products per account", + "Data sets per account", + "Data sets per product", + "Revisions per data set", + "Assets per revision", + "Assets per import job from Amazon S3", + "Asset per export job from Amazon S3", + "Asset size in GB", + "Concurrent in progress jobs to import assets from Amazon S3", + "Concurrent in progress jobs to import assets from a signed URL", + "Concurrent in progress jobs to export assets to Amazon S3", + "Concurrent in progress jobs to export assets to a signed URL" + ] + }, + "ListDataSetRevisionsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + } + }, + "required": [ + "DataSetId" + ] + }, + "ListDataSetRevisionsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "NextToken" + }, + "Revisions": { + "shape": "ListOfRevisionEntry" + } + } + }, + "ListDataSetsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "Origin": { + "shape": "__string", + "location": "querystring", + "locationName": "origin" + } + } + }, + "ListDataSetsResponse": { + "type": "structure", + "members": { + "DataSets": { + "shape": "ListOfDataSetEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListJobsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "querystring", + "locationName": "dataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "RevisionId": { + "shape": "__string", + "location": "querystring", + "locationName": "revisionId" + } + } + }, + "ListJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfJobEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListOfAssetDestinationEntry": { + "type": "list", + "member": { + "shape": "AssetDestinationEntry" + } + }, + "ListOfAssetSourceEntry": { + "type": "list", + "member": { + "shape": "AssetSourceEntry" + } + }, + "ListRevisionAssetsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "ListRevisionAssetsResponse": { + "type": "structure", + "members": { + "Assets": { + "shape": "ListOfAssetEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + } + }, + "required": [ + "ResourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "MapOf__string", + "locationName": "tags" + } + } + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 25 + }, + "Name": { + "type": "string" + }, + "NextToken": { + "type": "string" + }, + "Origin": { + "type": "string", + "enum": [ + "OWNED", + "ENTITLED" + ] + }, + "OriginDetails": { + "type": "structure", + "members": { + "ProductId": { + "shape": "__string" + } + }, + "required": [ + "ProductId" + ] + }, + "RequestDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlRequestDetails" + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3RequestDetails" + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlRequestDetails" + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3RequestDetails" + } + } + }, + "ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "ResourceType" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "ResourceType": { + "type": "string", + "enum": [ + "DATA_SET", + "REVISION", + "ASSET", + "JOB" + ] + }, + "ResponseDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlResponseDetails" + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3ResponseDetails" + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlResponseDetails" + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3ResponseDetails" + } + } + }, + "RevisionEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "S3SnapshotAsset": { + "type": "structure", + "members": { + "Size": { + "shape": "__doubleMin0" + } + }, + "required": [ + "Size" + ] + }, + "ServiceLimitExceededException": { + "type": "structure", + "members": { + "LimitName": { + "shape": "LimitName" + }, + "LimitValue": { + "shape": "__double" + }, + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "StartJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "StartJobResponse": { + "type": "structure", + "members": {} + }, + "State": { + "type": "string", + "enum": [ + "WAITING", + "IN_PROGRESS", + "ERROR", + "COMPLETED", + "CANCELLED", + "TIMED_OUT" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "Tags": { + "shape": "MapOf__string", + "locationName": "tags" + } + }, + "required": [ + "ResourceArn", + "Tags" + ] + }, + "ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Timestamp": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "Type": { + "type": "string", + "enum": [ + "IMPORT_ASSETS_FROM_S3", + "IMPORT_ASSET_FROM_SIGNED_URL", + "EXPORT_ASSETS_TO_S3", + "EXPORT_ASSET_TO_SIGNED_URL" + ] + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "TagKeys": { + "shape": "ListOf__string", + "location": "querystring", + "locationName": "tagKeys" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "UpdateAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId", + "Name" + ] + }, + "UpdateAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "UpdateDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Description": { + "shape": "Description" + }, + "Name": { + "shape": "Name" + } + }, + "required": [ + "DataSetId" + ] + }, + "UpdateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "UpdateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Finalized": { + "shape": "__boolean" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "UpdateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "ValidationException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__doubleMin0": { + "type": "double" + }, + "ListOfAssetEntry": { + "type": "list", + "member": { + "shape": "AssetEntry" + } + }, + "ListOfDataSetEntry": { + "type": "list", + "member": { + "shape": "DataSetEntry" + } + }, + "ListOfJobEntry": { + "type": "list", + "member": { + "shape": "JobEntry" + } + }, + "ListOfJobError": { + "type": "list", + "member": { + "shape": "JobError" + } + }, + "ListOfRevisionEntry": { + "type": "list", + "member": { + "shape": "RevisionEntry" + } + }, + "ListOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "MapOf__string": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "__string": { + "type": "string" + }, + "__stringMin0Max16384": { + "type": "string", + "min": 0, + "max": 16384 + }, + "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093": { + "type": "string", + "min": 24, + "max": 24, + "pattern": "/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/" + } + }, + "authorizers": { + "create_job_authorizer": { + "name": "create_job_authorizer", + "type": "provided", + "placement": { + "location": "header", + "name": "Authorization" + } + }, + "start_cancel_get_job_authorizer": { + "name": "start_cancel_get_job_authorizer", + "type": "provided", + "placement": { + "location": "header", + "name": "Authorization" + } + } + } +} \ No newline at end of file diff --git a/models/apis/dataexchange/2017-07-25/docs-2.json b/models/apis/dataexchange/2017-07-25/docs-2.json new file mode 100644 index 00000000000..2b6b9fc23f6 --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/docs-2.json @@ -0,0 +1,581 @@ +{ + "version" : "2.0", + "service" : "This is the API reference for AWS Data Exchange.
", + "operations" : { + "CancelJob" : "This operation cancels a job. Jobs can be cancelled only when they are in the WAITING state.
", + "CreateDataSet" : "This operation creates a data set.
", + "CreateJob" : "This operation creates a job.
", + "CreateRevision" : "This operation creates a revision for a data set.
", + "DeleteAsset" : "This operation deletes an asset.
", + "DeleteDataSet" : "This operation deletes a data set.
", + "DeleteRevision" : "This operation deletes a revision.
", + "GetAsset" : "This operation returns information about an asset.
", + "GetDataSet" : "This operation returns information about a data set.
", + "GetJob" : "This operation returns information about a job.
", + "GetRevision" : "This operation returns information about a revision.
", + "ListDataSetRevisions" : "This operation lists a data set's revisions sorted by CreatedAt in descending order.
", + "ListDataSets" : "This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored.
", + "ListJobs" : "This operation lists your jobs sorted by CreatedAt in descending order.
", + "ListRevisionAssets" : "This operation lists a revision's assets sorted alphabetically in descending order.
", + "ListTagsForResource" : "This operation lists the tags on the resource.
", + "StartJob" : "This operation starts a job.
", + "TagResource" : "This operation tags a resource.
", + "UntagResource" : "This operation removes one or more tags from a resource.
", + "UpdateAsset" : "This operation updates an asset.
", + "UpdateDataSet" : "This operation updates a data set.
", + "UpdateRevision" : "This operation updates a revision.
" + }, + "shapes" : { + "AccessDeniedException" : { + "base" : "Access to the resource is denied.
", + "refs" : { } + }, + "Arn" : { + "base" : "An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.
", + "refs" : { + "Asset$Arn" : "The ARN for the asset.
", + "AssetEntry$Arn" : "The ARN for the asset.
", + "DataSet$Arn" : "The ARN for the data set.
", + "DataSetEntry$Arn" : "The ARN for the data set.
", + "Job$Arn" : "The ARN for the job.
", + "JobEntry$Arn" : "The ARN for the job.
", + "Revision$Arn" : "The ARN for the revision.
", + "RevisionEntry$Arn" : "The ARN for the revision.
", + "TaggedDataSet$Arn" : "The ARN for the data set.
", + "TaggedRevision$Arn" : "The ARN for the revision
" + } + }, + "Asset" : { + "base" : "An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.
", + "refs" : { } + }, + "AssetDestinationEntry" : { + "base" : "The destination for the asset.
", + "refs" : { + "ListOfAssetDestinationEntry$member" : null + } + }, + "AssetDetails" : { + "base" : null, + "refs" : { + "Asset$AssetDetails" : "Information about the asset, including its size.
", + "AssetEntry$AssetDetails" : "Information about the asset, including its size.
" + } + }, + "AssetEntry" : { + "base" : "An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.
", + "refs" : { + "ListOfAssetEntry$member" : null + } + }, + "AssetName" : { + "base" : "The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
", + "refs" : { + "Asset$Name" : "The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
", + "AssetEntry$Name" : "The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
", + "ImportAssetFromSignedUrlJobErrorDetails$AssetName" : null, + "ImportAssetFromSignedUrlRequestDetails$AssetName" : "The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.
", + "ImportAssetFromSignedUrlResponseDetails$AssetName" : "The name for the asset associated with this import response.
", + "UpdateAssetRequest$Name" : "The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
" + } + }, + "AssetSourceEntry" : { + "base" : "The source of the assets.
", + "refs" : { + "ListOfAssetSourceEntry$member" : null + } + }, + "AssetType" : { + "base" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "refs" : { + "Asset$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "AssetEntry$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "CreateDataSetRequest$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "DataSet$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "DataSetEntry$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
", + "TaggedDataSet$AssetType" : "The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
" + } + }, + "Code" : { + "base" : null, + "refs" : { + "JobError$Code" : "The code for the job error." + } + }, + "ConflictException" : { + "base" : "The request couldn't be completed because it conflicted with the current state of the resource.
", + "refs" : { } + }, + "CreateDataSetRequest" : { + "base" : "A request to create a data set that contains one or more revisions.
", + "refs" : { } + }, + "CreateJobRequest" : { + "base" : "The CreateJob request. AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created. Created jobs must be started with the StartJob operation.
", + "refs" : { } + }, + "CreateRevisionRequest" : { + "base" : "Creates a revision for a data set. When they're created, revisions are not published to products, and therefore are not available to subscribers. To publish a revision to a data set in a product, the revision must first be finalized.
", + "refs" : { } + }, + "DataSet" : { + "base" : "A data set is an AWS resource with one or more revisions.
", + "refs" : { } + }, + "DataSetEntry" : { + "base" : "A data set is an AWS resource with one or more revisions.
", + "refs" : { + "ListOfDataSetEntry$member" : null + } + }, + "Description" : { + "base" : "A description of a resource.
", + "refs" : { + "CreateDataSetRequest$Description" : "A description for the data set. This value can be up to 16,348 characters long.
", + "DataSet$Description" : "The description for the data set.
", + "DataSetEntry$Description" : "The description for the data set.
", + "TaggedDataSet$Description" : "The description for the data set.
", + "UpdateDataSetRequest$Description" : "The description for the data set.
" + } + }, + "Details" : { + "base" : null, + "refs" : { + "JobError$Details" : null + } + }, + "ExportAssetToSignedUrlRequestDetails" : { + "base" : "Details of the operation to be performed by the job.
", + "refs" : { + "RequestDetails$ExportAssetToSignedUrl" : "Details about the export to signed URL request.
" + } + }, + "ExportAssetToSignedUrlResponseDetails" : { + "base" : "The details of the export to signed URL response.
", + "refs" : { + "ResponseDetails$ExportAssetToSignedUrl" : "Details for the export to signed URL response.
" + } + }, + "ExportAssetsToS3RequestDetails" : { + "base" : "Details of the operation to be performed by the job.
", + "refs" : { + "RequestDetails$ExportAssetsToS3" : "Details about the export to Amazon S3 request.
" + } + }, + "ExportAssetsToS3ResponseDetails" : { + "base" : "Details about the export to Amazon S3 response.
", + "refs" : { + "ResponseDetails$ExportAssetsToS3" : "Details for the export to Amazon S3 response.
" + } + }, + "Id" : { + "base" : "A unique identifier.
", + "refs" : { + "Asset$DataSetId" : "The unique identifier for the data set associated with this asset.
", + "Asset$Id" : "The unique identifier for the asset.
", + "Asset$RevisionId" : "The unique identifier for the revision associated with this asset.
", + "Asset$SourceId" : "The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.
", + "AssetDestinationEntry$AssetId" : "The unique identifier for the asset.
", + "AssetEntry$DataSetId" : "The unique identifier for the data set associated with this asset.
", + "AssetEntry$Id" : "The unique identifier for the asset.
", + "AssetEntry$RevisionId" : "The unique identifier for the revision associated with this asset.
", + "AssetEntry$SourceId" : "The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.
", + "DataSet$Id" : "The unique identifier for the data set.
", + "DataSet$SourceId" : "The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
", + "DataSetEntry$Id" : "The unique identifier for the data set.
", + "DataSetEntry$SourceId" : "The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
", + "ExportAssetToSignedUrlRequestDetails$AssetId" : "The unique identifier for the asset that is exported to a signed URL.
", + "ExportAssetToSignedUrlRequestDetails$DataSetId" : "The unique identifier for the data set associated with this export job.
", + "ExportAssetToSignedUrlRequestDetails$RevisionId" : "The unique identifier for the revision associated with this export request.
", + "ExportAssetToSignedUrlResponseDetails$AssetId" : "The unique identifier for the asset associated with this export job.
", + "ExportAssetToSignedUrlResponseDetails$DataSetId" : "The unique identifier for the data set associated with this export job.
", + "ExportAssetToSignedUrlResponseDetails$RevisionId" : "The unique identifier for the revision associated with this export response.
", + "ExportAssetsToS3RequestDetails$DataSetId" : "The unique identifier for the data set associated with this export job.
", + "ExportAssetsToS3RequestDetails$RevisionId" : "The unique identifier for the revision associated with this export request.
", + "ExportAssetsToS3ResponseDetails$DataSetId" : "The unique identifier for the data set associated with this export job.
", + "ExportAssetsToS3ResponseDetails$RevisionId" : "The unique identifier for the revision associated with this export response.
", + "ImportAssetFromSignedUrlRequestDetails$DataSetId" : "The unique identifier for the data set associated with this import job.
", + "ImportAssetFromSignedUrlRequestDetails$RevisionId" : "The unique identifier for the revision associated with this import request.
", + "ImportAssetFromSignedUrlResponseDetails$DataSetId" : "The unique identifier for the data set associated with this import job.
", + "ImportAssetFromSignedUrlResponseDetails$RevisionId" : "The unique identifier for the revision associated with this import response.
", + "ImportAssetsFromS3RequestDetails$DataSetId" : "The unique identifier for the data set associated with this import job.
", + "ImportAssetsFromS3RequestDetails$RevisionId" : "The unique identifier for the revision associated with this import request.
", + "ImportAssetsFromS3ResponseDetails$DataSetId" : "The unique identifier for the data set associated with this import job.
", + "ImportAssetsFromS3ResponseDetails$RevisionId" : "The unique identifier for the revision associated with this import response.
", + "Job$Id" : "The unique identifier for the job.
", + "JobEntry$Id" : "The unique identifier for the job.
", + "Revision$DataSetId" : "The unique identifier for the data set associated with this revision.
", + "Revision$Id" : "The unique identifier for the revision.
", + "Revision$SourceId" : "The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
", + "RevisionEntry$DataSetId" : "The unique identifier for the data set associated with this revision.
", + "RevisionEntry$Id" : "The unique identifier for the revision.
", + "RevisionEntry$SourceId" : "The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
", + "TaggedDataSet$Id" : "The unique identifier for the data set.
", + "TaggedDataSet$SourceId" : "The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
", + "TaggedRevision$DataSetId" : "The unique identifier for the data set associated with this revision.
", + "TaggedRevision$Id" : "The unique identifier for the revision.
", + "TaggedRevision$SourceId" : "The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
" + } + }, + "ImportAssetFromSignedUrlJobErrorDetails" : { + "base" : null, + "refs" : { + "Details$ImportAssetFromSignedUrlJobErrorDetails" : null + } + }, + "ImportAssetFromSignedUrlRequestDetails" : { + "base" : "Details of the operation to be performed by the job.
", + "refs" : { + "RequestDetails$ImportAssetFromSignedUrl" : "Details about the import from signed URL request.
" + } + }, + "ImportAssetFromSignedUrlResponseDetails" : { + "base" : "The details in the response for an import request, including the signed URL and other information.
", + "refs" : { + "ResponseDetails$ImportAssetFromSignedUrl" : "Details for the import from signed URL response.
" + } + }, + "ImportAssetsFromS3RequestDetails" : { + "base" : "Details of the operation to be performed by the job.
", + "refs" : { + "RequestDetails$ImportAssetsFromS3" : "Details about the import from Amazon S3 request.
" + } + }, + "ImportAssetsFromS3ResponseDetails" : { + "base" : "Details from an import from Amazon S3 response.
", + "refs" : { + "ResponseDetails$ImportAssetsFromS3" : "Details for the import from Amazon S3 response.
" + } + }, + "InternalServerException" : { + "base" : "An exception occurred with the service.", + "refs" : { } + }, + "Job" : { + "base" : "AWS Data Exchange jobs are asynchronous import or export operations used to create or copy assets. Jobs are deleted 90 days after they are created.
", + "refs" : { } + }, + "JobEntry" : { + "base" : "AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created.", + "refs" : { + "ListOfJobEntry$member" : null + } + }, + "JobError" : { + "base" : "An error that occurred with the job request.", + "refs" : { + "ListOfJobError$member" : null + } + }, + "JobErrorLimitName" : { + "base" : "The name of the limit that was reached.", + "refs" : { + "JobError$LimitName" : "The name of the limit that was reached.
" + } + }, + "JobErrorResourceTypes" : { + "base" : "The types of resource which the job error can apply to.", + "refs" : { + "JobError$ResourceType" : "The type of resource related to the error." + } + }, + "LimitName" : { + "base" : null, + "refs" : { + "ServiceQuotaExceededException$LimitName" : "The name of the quota that was exceeded.
" + } + }, + "ListOfAssetDestinationEntry" : { + "base" : "The destination where the assets will be exported.
", + "refs" : { + "ExportAssetsToS3RequestDetails$AssetDestinations" : "The destination for the asset.
", + "ExportAssetsToS3ResponseDetails$AssetDestinations" : "The destination in Amazon S3 where the asset is exported.
" + } + }, + "ListOfAssetSourceEntry" : { + "base" : "The list of sources for the assets.
", + "refs" : { + "Details$ImportAssetsFromS3JobErrorDetails" : null, + "ImportAssetsFromS3RequestDetails$AssetSources" : "Is a list of S3 bucket and object key pairs.
", + "ImportAssetsFromS3ResponseDetails$AssetSources" : "Is a list of Amazon S3 bucket and object key pairs.
" + } + }, + "ListOfAssets" : { + "base" : "The asset objects listed by the request.
", + "refs" : { } + }, + "ListOfDataSets" : { + "base" : "The data set objects listed by the request.
", + "refs" : { } + }, + "ListOfJobs" : { + "base" : "The token value retrieved from a previous call to access the next page of results.
", + "refs" : { } + }, + "ListOfRevisions" : { + "base" : "The revision objects listed by the request.
", + "refs" : { } + }, + "Name" : { + "base" : "The name of the model.", + "refs" : { + "CreateDataSetRequest$Name" : "The name of the data set.
", + "DataSet$Name" : "The name of the data set.
", + "DataSetEntry$Name" : "The name of the data set.
", + "TaggedDataSet$Name" : "The name of the data set.
", + "UpdateDataSetRequest$Name" : "The name of the data set.
" + } + }, + "NextToken" : { + "base" : "The token value retrieved from a previous call to access the next page of results.
", + "refs" : { + "ListOfAssets$NextToken" : "The token value retrieved from a previous call to access the next page of results.
", + "ListOfDataSets$NextToken" : "The token value retrieved from a previous call to access the next page of results.
", + "ListOfJobs$NextToken" : "The token value retrieved from a previous call to access the next page of results.
", + "ListOfRevisions$NextToken" : "The token value retrieved from a previous call to access the next page of results.
" + } + }, + "Origin" : { + "base" : "A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers). When an owned data set is published in a product, AWS Data Exchange creates a copy of the data set. Subscribers can access that copy of the data set as an entitled data set.
", + "refs" : { + "DataSet$Origin" : "A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
", + "DataSetEntry$Origin" : "A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
", + "TaggedDataSet$Origin" : "A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
" + } + }, + "OriginDetails" : { + "base" : null, + "refs" : { + "DataSet$OriginDetails" : "If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
", + "DataSetEntry$OriginDetails" : "If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
", + "TaggedDataSet$OriginDetails" : "If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
" + } + }, + "RequestDetails" : { + "base" : "The details for the request.
", + "refs" : { + "CreateJobRequest$Details" : "The details for the CreateJob request.
" + } + }, + "ResourceNotFoundException" : { + "base" : "The resource couldn't be found.
", + "refs" : { } + }, + "ResourceType" : { + "base" : null, + "refs" : { + "ConflictException$ResourceType" : "The type of the resource with the conflict.
", + "ResourceNotFoundException$ResourceType" : "The type of resource that couldn't be found.
" + } + }, + "ResponseDetails" : { + "base" : "Details for the response.
", + "refs" : { + "Job$Details" : "Details about the job.
", + "JobEntry$Details" : "Details of the operation to be performed by the job, such as export destination details or import source details.
" + } + }, + "Revision" : { + "base" : "A revision is a container for one or more assets.
", + "refs" : { } + }, + "RevisionEntry" : { + "base" : "A revision is a container for one or more assets.
", + "refs" : { + "ListOfRevisionEntry$member" : null + } + }, + "S3SnapshotAsset" : { + "base" : "The S3 object that is the asset.
", + "refs" : { + "AssetDetails$S3SnapshotAsset" : null + } + }, + "ServiceLimitExceededException" : { + "base" : "The request has exceeded the quotas imposed by the service.
", + "refs" : { } + }, + "ServiceQuotaExceededException" : { + "base" : "The request has exceeded the quotas imposed by the service.
", + "refs" : { } + }, + "State" : { + "base" : null, + "refs" : { + "Job$State" : "The state of the job.
", + "JobEntry$State" : "The state of the job.
" + } + }, + "TaggedDataSet" : { + "base" : "A data set is an AWS resource with one or more revisions.
", + "refs" : { } + }, + "TaggedRevision" : { + "base" : "A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.
", + "refs" : { } + }, + "TagsModel" : { + "base" : "You can assign metadata to your AWS Data Exchange resources in the form of tags. Each tag is a label that consists of a customer-defined key and an optional value that can make it easier to manage, search for, and filter resources.", + "refs" : { } + }, + "ThrottlingException" : { + "base" : "The limit on the number of requests per second was exceeded.
", + "refs" : { } + }, + "Timestamp" : { + "base" : "Dates and times in AWS Data Exchange are recorded in ISO 8601 format.
", + "refs" : { + "Asset$CreatedAt" : "The date and time that the asset was created, in ISO 8601 format.
", + "Asset$UpdatedAt" : "The date and time that the asset was last updated, in ISO 8601 format.
", + "AssetEntry$CreatedAt" : "The date and time that the asset was created, in ISO 8601 format.
", + "AssetEntry$UpdatedAt" : "The date and time that the asset was last updated, in ISO 8601 format.
", + "DataSet$CreatedAt" : "The date and time that the data set was created, in ISO 8601 format.
", + "DataSet$UpdatedAt" : "The date and time that the data set was last updated, in ISO 8601 format.
", + "DataSetEntry$CreatedAt" : "The date and time that the data set was created, in ISO 8601 format.
", + "DataSetEntry$UpdatedAt" : "The date and time that the data set was last updated, in ISO 8601 format.
", + "ExportAssetToSignedUrlResponseDetails$SignedUrlExpiresAt" : "The date and time that the signed URL expires, in ISO 8601 format.
", + "ImportAssetFromSignedUrlResponseDetails$SignedUrlExpiresAt" : "The time and date at which the signed URL expires, in ISO 8601 format.
", + "Job$CreatedAt" : "The date and time that the job was created, in ISO 8601 format.
", + "Job$UpdatedAt" : "The date and time that the job was last updated, in ISO 8601 format.
", + "JobEntry$CreatedAt" : "The date and time that the job was created, in ISO 8601 format.
", + "JobEntry$UpdatedAt" : "The date and time that the job was last updated, in ISO 8601 format.
", + "Revision$CreatedAt" : "The date and time that the revision was created, in ISO 8601 format.
", + "Revision$UpdatedAt" : "The date and time that the revision was last updated, in ISO 8601 format.
", + "RevisionEntry$CreatedAt" : "The date and time that the revision was created, in ISO 8601 format.
", + "RevisionEntry$UpdatedAt" : "The date and time that the revision was last updated, in ISO 8601 format.
", + "TaggedDataSet$CreatedAt" : "The date and time that the data set was created, in ISO 8601 format.
", + "TaggedDataSet$UpdatedAt" : "The date and time that the data set was last updated, in ISO 8601 format.
", + "TaggedRevision$CreatedAt" : "The date and time that the revision was created, in ISO 8601 format.
", + "TaggedRevision$UpdatedAt" : "The date and time that the revision was last updated, in ISO 8601 format.
" + } + }, + "Type" : { + "base" : null, + "refs" : { + "CreateJobRequest$Type" : "The type of job to be created.
", + "Job$Type" : "The job type.
", + "JobEntry$Type" : "The job type.
" + } + }, + "UpdateAssetRequest" : { + "base" : "The request to update an asset.
", + "refs" : { } + }, + "UpdateDataSetRequest" : { + "base" : "The request to update a data set.
", + "refs" : { } + }, + "UpdateRevisionRequest" : { + "base" : "The request to update a revision.
", + "refs" : { } + }, + "ValidationException" : { + "base" : "The request was invalid.
", + "refs" : { } + }, + "__boolean" : { + "base" : null, + "refs" : { + "Revision$Finalized" : "To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
", + "RevisionEntry$Finalized" : "To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
", + "TaggedRevision$Finalized" : "To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
", + "UpdateRevisionRequest$Finalized" : "Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.
" + } + }, + "__double" : { + "base" : null, + "refs" : { + "JobError$LimitValue" : "The value of the exceeded limit.", + "ServiceQuotaExceededException$LimitValue" : "The maximum value for the service-specific limit.
" + } + }, + "__doubleMin0" : { + "base" : null, + "refs" : { + "S3SnapshotAsset$Size" : "The size of the S3 object that is the object.
" + } + }, + "ListOfAssetEntry" : { + "base" : null, + "refs" : { + "ListOfAssets$Assets" : "The asset objects listed by the request.
" + } + }, + "ListOfDataSetEntry" : { + "base" : null, + "refs" : { + "ListOfDataSets$DataSets" : "The data set objects listed by the request.
" + } + }, + "ListOfJobEntry" : { + "base" : null, + "refs" : { + "ListOfJobs$Jobs" : "The jobs listed by the request.
" + } + }, + "ListOfJobError" : { + "base" : null, + "refs" : { + "Job$Errors" : "The errors associated with jobs.
", + "JobEntry$Errors" : "Errors for jobs.
" + } + }, + "ListOfRevisionEntry" : { + "base" : null, + "refs" : { + "ListOfRevisions$Revisions" : "The asset objects listed by the request.
" + } + }, + "MapOf__string" : { + "base" : null, + "refs" : { + "CreateDataSetRequest$Tags" : "A data set tag is an optional label that you can assign to a data set when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.
", + "CreateRevisionRequest$Tags" : "A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.
", + "TaggedDataSet$Tags" : "The tags for the data set.
", + "TaggedRevision$Tags" : "The tags for the revision.
", + "TagsModel$Tags" : "A label that consists of a customer-defined key and an optional value." + } + }, + "__string" : { + "base" : null, + "refs" : { + "AccessDeniedException$Message" : "Access to the resource is denied.
", + "AssetDestinationEntry$Bucket" : "The S3 bucket that is the destination for the asset.
", + "AssetDestinationEntry$Key" : "The name of the object in Amazon S3 for the asset.
", + "AssetSourceEntry$Bucket" : "The S3 bucket that's part of the source of the asset.
", + "AssetSourceEntry$Key" : "The name of the object in Amazon S3 for the asset.
", + "ConflictException$Message" : "The request couldn't be completed because it conflicted with the current state of the resource.
", + "ConflictException$ResourceId" : "The unique identifier for the resource with the conflict.
", + "ExportAssetToSignedUrlResponseDetails$SignedUrl" : "The signed URL for the export request.
", + "ImportAssetFromSignedUrlResponseDetails$SignedUrl" : "The signed URL.
", + "InternalServerException$Message" : "The message identifying the service exception that occurred.", + "JobError$Message" : "The message related to the job error.", + "JobError$ResourceId" : "The unqiue identifier for the resource related to the error.", + "OriginDetails$ProductId" : null, + "ResourceNotFoundException$Message" : "The resource couldn't be found.
", + "ResourceNotFoundException$ResourceId" : "The unique identifier for the resource that couldn't be found.
", + "ServiceQuotaExceededException$Message" : "The request has exceeded the quotas imposed by the service.
", + "ThrottlingException$Message" : "The limit on the number of requests per second was exceeded.
", + "ValidationException$Message" : "The message that informs you about what was invalid about the request.
", + "MapOf__string$member" : null + } + }, + "__stringMin0Max16384" : { + "base" : null, + "refs" : { + "CreateRevisionRequest$Comment" : "An optional comment about the revision.
", + "Revision$Comment" : "An optional comment about the revision.
", + "RevisionEntry$Comment" : "An optional comment about the revision.
", + "TaggedRevision$Comment" : "An optional comment about the revision.
", + "UpdateRevisionRequest$Comment" : "An optional comment about the revision.
" + } + }, + "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" : { + "base" : null, + "refs" : { + "ImportAssetFromSignedUrlRequestDetails$Md5Hash" : "The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.
", + "ImportAssetFromSignedUrlResponseDetails$Md5Hash" : "The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.
" + } + } + } +} \ No newline at end of file diff --git a/models/apis/dataexchange/2017-07-25/paginators-1.json b/models/apis/dataexchange/2017-07-25/paginators-1.json new file mode 100644 index 00000000000..d76ccf7f38b --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListDataSetRevisions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Revisions" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DataSets" + }, + "ListJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" + }, + "ListRevisionAssets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Assets" + } + } +} \ No newline at end of file diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index a7c3db8efc0..0906e9121f7 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -542,6 +542,7 @@ "Name":{"shape":"TagValue"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "Tags":{"shape":"TagList"} } }, @@ -720,6 +721,7 @@ "DestinationNetworkInterfaceArns":{"shape":"DestinationNetworkInterfaceArns"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "ErrorCode":{"shape":"string"}, "ErrorDetail":{"shape":"string"}, "CreationTime":{"shape":"Time"} @@ -775,7 +777,8 @@ "type":"string", "enum":[ "PUBLIC", - "PRIVATE_LINK" + "PRIVATE_LINK", + "FIPS" ] }, "FilterList":{ @@ -972,7 +975,7 @@ "NonEmptySubdirectory":{ "type":"string", "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\./]+$" + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]+$" }, "OnPremConfig":{ "type":"structure", @@ -1028,7 +1031,6 @@ "type":"string", "enum":[ "NONE", - "BEST_EFFORT", "PRESERVE" ] }, @@ -1078,6 +1080,11 @@ "DEEP_ARCHIVE" ] }, + "ScheduleExpressionCron":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\ \\_\\*\\?\\,\\|\\^\\-\\/\\#\\s\\(\\)\\+]*$" + }, "ServerHostname":{ "type":"string", "max":255, @@ -1097,12 +1104,13 @@ "SmbPassword":{ "type":"string", "max":104, - "pattern":"^.{0,104}$" + "pattern":"^.{0,104}$", + "sensitive":true }, "SmbUser":{ "type":"string", "max":104, - "pattern":"^[^\\\\x5B\\\\x5D\\\\/:;|=,+*?]{1,104}$" + "pattern":"^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$" }, "SmbVersion":{ "type":"string", @@ -1134,7 +1142,7 @@ "Subdirectory":{ "type":"string", "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\./]*$" + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" }, "TagKey":{ "type":"string", @@ -1215,6 +1223,7 @@ "members":{ "PrepareDuration":{"shape":"Duration"}, "PrepareStatus":{"shape":"PhaseStatus"}, + "TotalDuration":{"shape":"Duration"}, "TransferDuration":{"shape":"Duration"}, "TransferStatus":{"shape":"PhaseStatus"}, "VerifyDuration":{"shape":"Duration"}, @@ -1254,6 +1263,13 @@ "DISABLED" ] }, + "TaskSchedule":{ + "type":"structure", + "required":["ScheduleExpression"], + "members":{ + "ScheduleExpression":{"shape":"ScheduleExpressionCron"} + } + }, "TaskStatus":{ "type":"string", "enum":[ @@ -1310,6 +1326,7 @@ "TaskArn":{"shape":"TaskArn"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "Name":{"shape":"TagValue"}, "CloudWatchLogGroupArn":{"shape":"LogGroupArn"} } diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 2afac0729d2..73231a0ec73 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -7,7 +7,7 @@ "CreateLocationEfs": "Creates an endpoint for an Amazon EFS file system.
", "CreateLocationNfs": "Defines a file system on a Network File System (NFS) server that can be read from or written to
", "CreateLocationS3": "Creates an endpoint for an Amazon S3 bucket.
For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.
For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location in the AWS DataSync User Guide.
", - "CreateLocationSmb": "Defines a file system on an Server Message Block (SMB) server that can be read from or written to
", + "CreateLocationSmb": "Defines a file system on an Server Message Block (SMB) server that can be read from or written to.
", "CreateTask": "Creates a task. A task is a set of two locations (source and destination) and a set of Options that you use to control the behavior of a task. If you don't specify Options when you create a task, AWS DataSync populates them with service defaults.
When you create a task, it first enters the CREATING state. During CREATING AWS DataSync attempts to mount the on-premises Network File System (NFS) location. The task transitions to the AVAILABLE state without waiting for the AWS location to become mounted. If required, AWS DataSync mounts the AWS location before each task execution.
If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.
", "DeleteAgent": "Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.
", "DeleteLocation": "Deletes the configuration of a location used by AWS DataSync.
", @@ -268,6 +268,7 @@ "base": null, "refs": { "TaskExecutionResultDetail$PrepareDuration": "The total time in milliseconds that AWS DataSync spent in the PREPARING phase.
", + "TaskExecutionResultDetail$TotalDuration": "The total time in milliseconds that AWS DataSync took to transfer the file from the source to the destination location.
", "TaskExecutionResultDetail$TransferDuration": "The total time in milliseconds that AWS DataSync spent in the TRANSFERRING phase.
", "TaskExecutionResultDetail$VerifyDuration": "The total time in milliseconds that AWS DataSync spent in the VERIFYING phase.
" } @@ -526,7 +527,7 @@ "base": null, "refs": { "CreateLocationNfsRequest$Subdirectory": "The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name
\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash,
or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
", - "CreateLocationSmbRequest$Subdirectory": "The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.
To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.
" + "CreateLocationSmbRequest$Subdirectory": "The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.
Subdirectory
must be specified with forward slashes. For example /path/to/folder
.
To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.
" } }, "OnPremConfig": { @@ -618,6 +619,12 @@ "DescribeLocationS3Response$S3StorageClass": "The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.
" } }, + "ScheduleExpressionCron": { + "base": null, + "refs": { + "TaskSchedule$ScheduleExpression": "A cron expression that specifies when AWS DataSync initiates a scheduled transfer from a source to a destination location.
" + } + }, "ServerHostname": { "base": null, "refs": { @@ -677,7 +684,7 @@ "Subdirectory": { "base": null, "refs": { - "CreateLocationEfsRequest$Subdirectory": "A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.
", + "CreateLocationEfsRequest$Subdirectory": "A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.
Subdirectory
must be specified with forward slashes. For example /path/to/folder
.
A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.
" } }, @@ -809,7 +816,15 @@ "TaskQueueing": { "base": null, "refs": { - "Options$TaskQueueing": "A value that determines whether tasks should be queued before executing the tasks. If set to Enabled
, the tasks will queued. The default is Enabled
.
If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see task-queue.
" + "Options$TaskQueueing": "A value that determines whether tasks should be queued before executing the tasks. If set to ENABLED
, the tasks will be queued. The default is ENABLED
.
If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see queue-task-execution.
" + } + }, + "TaskSchedule": { + "base": "Specifies the schedule you want your task to use for repeated executions. For more information, see Schedule Expressions for Rules.
", + "refs": { + "CreateTaskRequest$Schedule": "Specifies a schedule used to periodically transfer files from a source to a destination location. The schedule should be specified in UTC time. For more information, see task-scheduling.
", + "DescribeTaskResponse$Schedule": "The schedule used to periodically transfer files from a source to a destination location.
", + "UpdateTaskRequest$Schedule": "Specifies a schedule used to periodically transfer files from a source to a destination location. You can configure your task to execute hourly, daily, weekly or on specific days of the week. You control when in the day or hour you want the task to execute. The time you specify is UTC time. For more information, see task-scheduling.
" } }, "TaskStatus": { diff --git a/models/apis/discovery/2015-11-01/api-2.json b/models/apis/discovery/2015-11-01/api-2.json index 5197cf8631c..3f12a99c747 100644 --- a/models/apis/discovery/2015-11-01/api-2.json +++ b/models/apis/discovery/2015-11-01/api-2.json @@ -24,7 +24,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "BatchDeleteImportData":{ @@ -37,8 +38,10 @@ "output":{"shape":"BatchDeleteImportDataResponse"}, "errors":[ {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateApplication":{ @@ -53,7 +56,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateTags":{ @@ -69,7 +73,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteApplications":{ @@ -84,7 +89,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteTags":{ @@ -100,7 +106,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeAgents":{ @@ -115,7 +122,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeConfigurations":{ @@ -130,7 +138,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeContinuousExports":{ @@ -147,7 +156,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeExportConfigurations":{ @@ -163,7 +173,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ], "deprecated":true }, @@ -179,7 +190,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeImportTasks":{ @@ -192,8 +204,10 @@ "output":{"shape":"DescribeImportTasksResponse"}, "errors":[ {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeTags":{ @@ -209,7 +223,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateConfigurationItemsFromApplication":{ @@ -224,7 +239,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ExportConfigurations":{ @@ -239,7 +255,8 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"HomeRegionNotSetException"} ], "deprecated":true }, @@ -255,7 +272,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListConfigurations":{ @@ -271,7 +289,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListServerNeighbors":{ @@ -286,7 +305,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartContinuousExport":{ @@ -304,7 +324,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartDataCollectionByAgentIds":{ @@ -319,7 +340,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartExportTask":{ @@ -335,7 +357,8 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartImportTask":{ @@ -349,8 +372,10 @@ "errors":[ {"shape":"ResourceInUseException"}, {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StopContinuousExport":{ @@ -368,7 +393,8 @@ {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StopDataCollectionByAgentIds":{ @@ -383,7 +409,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "UpdateApplication":{ @@ -398,7 +425,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] } }, @@ -975,6 +1003,13 @@ "connectorSummary":{"shape":"CustomerConnectorInfo"} } }, + "HomeRegionNotSetException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, "ImportStatus":{ "type":"string", "enum":[ diff --git a/models/apis/discovery/2015-11-01/docs-2.json b/models/apis/discovery/2015-11-01/docs-2.json index 123fe8c80f0..aef504ac6e5 100644 --- a/models/apis/discovery/2015-11-01/docs-2.json +++ b/models/apis/discovery/2015-11-01/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.
Application Discovery Service offers two modes of operation:
Agentless discovery mode is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment. Agentless discovery doesn't collect information about software and software dependencies. It also doesn't work in non-VMware environments.
Agent-based discovery mode collects a richer set of data than agentless discovery by using the AWS Application Discovery Agent, which you install on one or more hosts in your data center. The agent captures infrastructure and application information, including an inventory of installed software applications, system and process performance, resource utilization, and network dependencies between workloads. The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.
We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.
Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.
Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.
This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.
This guide is intended for use with the AWS Application Discovery Service User Guide .
", + "service": "AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.
Application Discovery Service offers two modes of operation:
Agentless discovery mode is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment. Agentless discovery doesn't collect information about software and software dependencies. It also doesn't work in non-VMware environments.
Agent-based discovery mode collects a richer set of data than agentless discovery by using the AWS Application Discovery Agent, which you install on one or more hosts in your data center. The agent captures infrastructure and application information, including an inventory of installed software applications, system and process performance, resource utilization, and network dependencies between workloads. The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.
We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.
Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.
Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.
This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.
This guide is intended for use with the AWS Application Discovery Service User Guide .
Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException
error will be returned. Also, you must make the API calls while in your home region.
Associates one or more configuration items with an application.
", "BatchDeleteImportData": "Deletes one or more import tasks, each identified by their import ID. Each import task has a number of records that can identify servers or applications.
AWS Application Discovery Service has built-in matching logic that will identify when discovered servers match existing entries that you've previously discovered, the information for the already-existing discovered server is updated. When you delete an import task that contains records that were used to match, the information in those matched records that comes from the deleted records will also be deleted.
", @@ -487,7 +487,7 @@ "ExportIds": { "base": null, "refs": { - "DescribeExportConfigurationsRequest$exportIds": "A list of continuous export ids to search for.
", + "DescribeExportConfigurationsRequest$exportIds": "A list of continuous export IDs to search for.
", "DescribeExportTasksRequest$exportIds": "One or more unique identifiers used to query the status of an export request.
" } }, @@ -566,6 +566,11 @@ "refs": { } }, + "HomeRegionNotSetException": { + "base": "The home region is not set. Set the home region to continue.
", + "refs": { + } + }, "ImportStatus": { "base": null, "refs": { @@ -706,6 +711,7 @@ "refs": { "AuthorizationErrorException$message": null, "ConflictErrorException$message": null, + "HomeRegionNotSetException$message": null, "InvalidParameterException$message": null, "InvalidParameterValueException$message": null, "OperationNotPermittedException$message": null, diff --git a/models/apis/dlm/2018-01-12/api-2.json b/models/apis/dlm/2018-01-12/api-2.json index d5a1dc46da3..daed8661092 100644 --- a/models/apis/dlm/2018-01-12/api-2.json +++ b/models/apis/dlm/2018-01-12/api-2.json @@ -70,6 +70,48 @@ {"shape":"LimitExceededException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "UpdateLifecyclePolicy":{ "name":"UpdateLifecyclePolicy", "http":{ @@ -87,6 +129,18 @@ } }, "shapes":{ + "AvailabilityZone":{ + "type":"string", + "max":16, + "min":0, + "pattern":"([a-z]+-){2,3}\\d[a-z]" + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{"shape":"AvailabilityZone"}, + "max":10, + "min":1 + }, "CopyTags":{"type":"boolean"}, "Count":{ "type":"integer", @@ -105,7 +159,8 @@ "ExecutionRoleArn":{"shape":"ExecutionRoleArn"}, "Description":{"shape":"PolicyDescription"}, "State":{"shape":"SettablePolicyStateValues"}, - "PolicyDetails":{"shape":"PolicyDetails"} + "PolicyDetails":{"shape":"PolicyDetails"}, + "Tags":{"shape":"TagMap"} } }, "CreateLifecyclePolicyResponse":{ @@ -145,7 +200,23 @@ "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, "ExcludeBootVolume":{"type":"boolean"}, - "ExecutionRoleArn":{"type":"string"}, + "ExecutionRoleArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:aws:iam::\\d+:role/.*" + }, + "FastRestoreRule":{ + "type":"structure", + "required":[ + "Count", + "AvailabilityZones" + ], + "members":{ + "Count":{"shape":"Count"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"} + } + }, "GetLifecyclePoliciesRequest":{ "type":"structure", "members":{ @@ -241,10 +312,13 @@ "PolicyId":{"shape":"PolicyId"}, "Description":{"shape":"PolicyDescription"}, "State":{"shape":"GettablePolicyStateValues"}, + "StatusMessage":{"shape":"StatusMessage"}, "ExecutionRoleArn":{"shape":"ExecutionRoleArn"}, "DateCreated":{"shape":"Timestamp"}, "DateModified":{"shape":"Timestamp"}, - "PolicyDetails":{"shape":"PolicyDetails"} + "PolicyDetails":{"shape":"PolicyDetails"}, + "Tags":{"shape":"TagMap"}, + "PolicyArn":{"shape":"PolicyArn"} } }, "LifecyclePolicySummary":{ @@ -252,7 +326,8 @@ "members":{ "PolicyId":{"shape":"PolicyId"}, "Description":{"shape":"PolicyDescription"}, - "State":{"shape":"GettablePolicyStateValues"} + "State":{"shape":"GettablePolicyStateValues"}, + "Tags":{"shape":"TagMap"} } }, "LifecyclePolicySummaryList":{ @@ -269,6 +344,23 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagMap"} + } + }, "Parameter":{"type":"string"}, "ParameterList":{ "type":"list", @@ -280,10 +372,15 @@ "ExcludeBootVolume":{"shape":"ExcludeBootVolume"} } }, + "PolicyArn":{ + "type":"string", + "pattern":"^arn:aws:dlm:[A-Za-z0-9_/.-]{0,63}:\\d+:policy/[0-9A-Za-z_-]{1,128}$" + }, "PolicyDescription":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"[0-9A-Za-z _-]+" }, "PolicyDetails":{ "type":"structure", @@ -295,7 +392,12 @@ "Parameters":{"shape":"Parameters"} } }, - "PolicyId":{"type":"string"}, + "PolicyId":{ + "type":"string", + "max":64, + "min":0, + "pattern":"policy-[A-Za-z0-9]+" + }, "PolicyIdList":{ "type":"list", "member":{"shape":"PolicyId"} @@ -343,7 +445,8 @@ "TagsToAdd":{"shape":"TagsToAddList"}, "VariableTags":{"shape":"VariableTagsList"}, "CreateRule":{"shape":"CreateRule"}, - "RetainRule":{"shape":"RetainRule"} + "RetainRule":{"shape":"RetainRule"}, + "FastRestoreRule":{"shape":"FastRestoreRule"} } }, "ScheduleList":{ @@ -355,7 +458,8 @@ "ScheduleName":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"[\\p{all}]*" }, "SettablePolicyStateValues":{ "type":"string", @@ -364,7 +468,18 @@ "DISABLED" ] }, - "String":{"type":"string"}, + "StatusMessage":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[\\p{all}]*" + }, + "String":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[\\p{all}]*" + }, "Tag":{ "type":"structure", "required":[ @@ -376,7 +491,55 @@ "Value":{"shape":"String"} } }, - "TagFilter":{"type":"string"}, + "TagFilter":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\p{all}]*" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{"shape":"TagMap"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "TagsToAddFilterList":{ "type":"list", "member":{"shape":"TagFilter"}, @@ -403,7 +566,9 @@ }, "Time":{ "type":"string", - "pattern":"^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" + "max":5, + "min":5, + "pattern":"^(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" }, "TimesList":{ "type":"list", @@ -414,6 +579,30 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLifecyclePolicyRequest":{ "type":"structure", "required":["PolicyId"], diff --git a/models/apis/dlm/2018-01-12/docs-2.json b/models/apis/dlm/2018-01-12/docs-2.json index e66c6d48e33..0d0992dc412 100644 --- a/models/apis/dlm/2018-01-12/docs-2.json +++ b/models/apis/dlm/2018-01-12/docs-2.json @@ -6,9 +6,24 @@ "DeleteLifecyclePolicy": "Deletes the specified lifecycle policy and halts the automated operations that the policy specified.
", "GetLifecyclePolicies": "Gets summary information about all or the specified data lifecycle policies.
To get complete information about a policy, use GetLifecyclePolicy.
", "GetLifecyclePolicy": "Gets detailed information about the specified lifecycle policy.
", + "ListTagsForResource": "Lists the tags for the specified resource.
", + "TagResource": "Adds the specified tags to the specified resource.
", + "UntagResource": "Removes the specified tags from the specified resource.
", "UpdateLifecyclePolicy": "Updates the specified lifecycle policy.
" }, "shapes": { + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "FastRestoreRule$AvailabilityZones": "The Availability Zones in which to enable fast snapshot restore.
" + } + }, "CopyTags": { "base": null, "refs": { @@ -18,6 +33,7 @@ "Count": { "base": null, "refs": { + "FastRestoreRule$Count": "The number of snapshots to be enabled with fast snapshot restore.
", "RetainRule$Count": "The number of snapshots to keep for each volume, up to a maximum of 1000.
" } }, @@ -79,6 +95,12 @@ "UpdateLifecyclePolicyRequest$ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.
" } }, + "FastRestoreRule": { + "base": "Specifies when to enable fast snapshot restore.
", + "refs": { + "Schedule$FastRestoreRule": "Enable fast snapshot restore.
" + } + }, "GetLifecyclePoliciesRequest": { "base": null, "refs": { @@ -152,6 +174,16 @@ "refs": { } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "Parameter": { "base": null, "refs": { @@ -171,6 +203,15 @@ "PolicyDetails$Parameters": "A set of optional parameters that can be provided by the policy.
" } }, + "PolicyArn": { + "base": null, + "refs": { + "LifecyclePolicy$PolicyArn": "The Amazon Resource Name (ARN) of the policy.
", + "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource.
", + "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource.
", + "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource.
" + } + }, "PolicyDescription": { "base": null, "refs": { @@ -183,9 +224,9 @@ "PolicyDetails": { "base": "Specifies the configuration of a lifecycle policy.
", "refs": { - "CreateLifecyclePolicyRequest$PolicyDetails": "The configuration details of the lifecycle policy.
Target tags cannot be re-used across lifecycle policies.
", + "CreateLifecyclePolicyRequest$PolicyDetails": "The configuration details of the lifecycle policy.
", "LifecyclePolicy$PolicyDetails": "The configuration of the lifecycle policy
", - "UpdateLifecyclePolicyRequest$PolicyDetails": "The configuration of the lifecycle policy.
Target tags cannot be re-used across policies.
" + "UpdateLifecyclePolicyRequest$PolicyDetails": "The configuration of the lifecycle policy. You cannot update the policy type or the resource type.
" } }, "PolicyId": { @@ -262,6 +303,12 @@ "UpdateLifecyclePolicyRequest$State": "The desired activation state of the lifecycle policy after creation.
" } }, + "StatusMessage": { + "base": null, + "refs": { + "LifecyclePolicy$StatusMessage": "The description of the status.
" + } + }, "String": { "base": null, "refs": { @@ -286,6 +333,45 @@ "TargetTagsFilterList$member": null } }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "The tag keys.
" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateLifecyclePolicyRequest$Tags": "The tags to apply to the lifecycle policy during creation.
", + "LifecyclePolicy$Tags": "The tags.
", + "LifecyclePolicySummary$Tags": "The tags.
", + "ListTagsForResourceResponse$Tags": "Information about the tags.
", + "TagResourceRequest$Tags": "One or more tags.
" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, "TagsToAddFilterList": { "base": null, "refs": { @@ -329,6 +415,16 @@ "LifecyclePolicy$DateModified": "The local date and time when the lifecycle policy was last modified.
" } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, "UpdateLifecyclePolicyRequest": { "base": null, "refs": { diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index b28bd1ab00a..29830f6d43d 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -1358,6 +1358,15 @@ "input":{"shape":"DescribeExportTasksRequest"}, "output":{"shape":"DescribeExportTasksResult"} }, + "DescribeFastSnapshotRestores":{ + "name":"DescribeFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFastSnapshotRestoresRequest"}, + "output":{"shape":"DescribeFastSnapshotRestoresResult"} + }, "DescribeFleetHistory":{ "name":"DescribeFleetHistory", "http":{ @@ -2120,6 +2129,15 @@ "input":{"shape":"DisableEbsEncryptionByDefaultRequest"}, "output":{"shape":"DisableEbsEncryptionByDefaultResult"} }, + "DisableFastSnapshotRestores":{ + "name":"DisableFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableFastSnapshotRestoresRequest"}, + "output":{"shape":"DisableFastSnapshotRestoresResult"} + }, "DisableTransitGatewayRouteTablePropagation":{ "name":"DisableTransitGatewayRouteTablePropagation", "http":{ @@ -2225,6 +2243,15 @@ "input":{"shape":"EnableEbsEncryptionByDefaultRequest"}, "output":{"shape":"EnableEbsEncryptionByDefaultResult"} }, + "EnableFastSnapshotRestores":{ + "name":"EnableFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableFastSnapshotRestoresRequest"}, + "output":{"shape":"EnableFastSnapshotRestoresResult"} + }, "EnableTransitGatewayRouteTablePropagation":{ "name":"EnableTransitGatewayRouteTablePropagation", "http":{ @@ -2579,6 +2606,15 @@ "input":{"shape":"ModifyInstanceEventStartTimeRequest"}, "output":{"shape":"ModifyInstanceEventStartTimeResult"} }, + "ModifyInstanceMetadataOptions":{ + "name":"ModifyInstanceMetadataOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceMetadataOptionsRequest"}, + "output":{"shape":"ModifyInstanceMetadataOptionsResult"} + }, "ModifyInstancePlacement":{ "name":"ModifyInstancePlacement", "http":{ @@ -4293,6 +4329,13 @@ "unavailable" ] }, + "AvailabilityZoneStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, "AvailableCapacity":{ "type":"structure", "members":{ @@ -5634,7 +5677,7 @@ "locationName":"encrypted" }, "KmsKeyId":{ - "shape":"KmsKeyId", + "shape":"String", "locationName":"kmsKeyId" }, "PresignedUrl":{ @@ -5643,6 +5686,10 @@ }, "SourceRegion":{"shape":"String"}, "SourceSnapshotId":{"shape":"String"}, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "locationName":"dryRun" @@ -5655,6 +5702,10 @@ "SnapshotId":{ "shape":"String", "locationName":"snapshotId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" } } }, @@ -5805,6 +5856,7 @@ }, "CertificateArn":{"shape":"String"}, "Type":{"shape":"GatewayType"}, + "DeviceName":{"shape":"String"}, "DryRun":{ "shape":"Boolean", "locationName":"dryRun" @@ -7217,6 +7269,10 @@ "shape":"String", "locationName":"type" }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, "Tags":{ "shape":"TagList", "locationName":"tagSet" @@ -8760,6 +8816,92 @@ } } }, + "DescribeFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "DescribeFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"DescribeFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "DescribeFastSnapshotRestoresMaxResults":{ + "type":"integer", + "max":200, + "min":0 + }, + "DescribeFastSnapshotRestoresRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"DescribeFastSnapshotRestoresMaxResults"}, + "NextToken":{"shape":"NextToken"}, + "DryRun":{"shape":"Boolean"} + } + }, + "DescribeFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "FastSnapshotRestores":{ + "shape":"DescribeFastSnapshotRestoreSuccessSet", + "locationName":"fastSnapshotRestoreSet" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, "DescribeFleetError":{ "type":"structure", "members":{ @@ -11519,6 +11661,146 @@ } } }, + "DisableFastSnapshotRestoreErrorItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "FastSnapshotRestoreStateErrors":{ + "shape":"DisableFastSnapshotRestoreStateErrorSet", + "locationName":"fastSnapshotRestoreStateErrorSet" + } + } + }, + "DisableFastSnapshotRestoreErrorSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreErrorItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoreStateError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "DisableFastSnapshotRestoreStateErrorItem":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Error":{ + "shape":"DisableFastSnapshotRestoreStateError", + "locationName":"error" + } + } + }, + "DisableFastSnapshotRestoreStateErrorSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreStateErrorItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "DisableFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoresRequest":{ + "type":"structure", + "required":[ + "AvailabilityZones", + "SourceSnapshotIds" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneStringList", + "locationName":"AvailabilityZone" + }, + "SourceSnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SourceSnapshotId" + }, + "DryRun":{"shape":"Boolean"} + } + }, + "DisableFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"DisableFastSnapshotRestoreSuccessSet", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"DisableFastSnapshotRestoreErrorSet", + "locationName":"unsuccessful" + } + } + }, "DisableTransitGatewayRouteTablePropagationRequest":{ "type":"structure", "required":[ @@ -12121,6 +12403,146 @@ } } }, + "EnableFastSnapshotRestoreErrorItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "FastSnapshotRestoreStateErrors":{ + "shape":"EnableFastSnapshotRestoreStateErrorSet", + "locationName":"fastSnapshotRestoreStateErrorSet" + } + } + }, + "EnableFastSnapshotRestoreErrorSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreErrorItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoreStateError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "EnableFastSnapshotRestoreStateErrorItem":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Error":{ + "shape":"EnableFastSnapshotRestoreStateError", + "locationName":"error" + } + } + }, + "EnableFastSnapshotRestoreStateErrorSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreStateErrorItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "EnableFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoresRequest":{ + "type":"structure", + "required":[ + "AvailabilityZones", + "SourceSnapshotIds" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneStringList", + "locationName":"AvailabilityZone" + }, + "SourceSnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SourceSnapshotId" + }, + "DryRun":{"shape":"Boolean"} + } + }, + "EnableFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"EnableFastSnapshotRestoreSuccessSet", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"EnableFastSnapshotRestoreErrorSet", + "locationName":"unsuccessful" + } + } + }, "EnableTransitGatewayRouteTablePropagationRequest":{ "type":"structure", "required":[ @@ -12584,6 +13006,16 @@ "locationName":"item" } }, + "FastSnapshotRestoreStateCode":{ + "type":"string", + "enum":[ + "enabling", + "optimizing", + "enabled", + "disabling", + "disabled" + ] + }, "Filter":{ "type":"structure", "members":{ @@ -13767,6 +14199,13 @@ "host" ] }, + "HttpTokensState":{ + "type":"string", + "enum":[ + "optional", + "required" + ] + }, "HypervisorType":{ "type":"string", "enum":[ @@ -14759,6 +15198,10 @@ "Licenses":{ "shape":"LicenseList", "locationName":"licenseSet" + }, + "MetadataOptions":{ + "shape":"InstanceMetadataOptionsResponse", + "locationName":"metadataOptions" } } }, @@ -15080,6 +15523,49 @@ "targeted" ] }, + "InstanceMetadataEndpointState":{ + "type":"string", + "enum":[ + "disabled", + "enabled" + ] + }, + "InstanceMetadataOptionsRequest":{ + "type":"structure", + "members":{ + "HttpTokens":{"shape":"HttpTokensState"}, + "HttpPutResponseHopLimit":{"shape":"Integer"}, + "HttpEndpoint":{"shape":"InstanceMetadataEndpointState"} + } + }, + "InstanceMetadataOptionsResponse":{ + "type":"structure", + "members":{ + "State":{ + "shape":"InstanceMetadataOptionsState", + "locationName":"state" + }, + "HttpTokens":{ + "shape":"HttpTokensState", + "locationName":"httpTokens" + }, + "HttpPutResponseHopLimit":{ + "shape":"Integer", + "locationName":"httpPutResponseHopLimit" + }, + "HttpEndpoint":{ + "shape":"InstanceMetadataEndpointState", + "locationName":"httpEndpoint" + } + } + }, + "InstanceMetadataOptionsState":{ + "type":"string", + "enum":[ + "pending", + "applied" + ] + }, "InstanceMonitoring":{ "type":"structure", "members":{ @@ -17189,6 +17675,30 @@ } } }, + "ModifyInstanceMetadataOptionsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "HttpTokens":{"shape":"HttpTokensState"}, + "HttpPutResponseHopLimit":{"shape":"Integer"}, + "HttpEndpoint":{"shape":"InstanceMetadataEndpointState"}, + "DryRun":{"shape":"Boolean"} + } + }, + "ModifyInstanceMetadataOptionsResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceMetadataOptions":{ + "shape":"InstanceMetadataOptionsResponse", + "locationName":"instanceMetadataOptions" + } + } + }, "ModifyInstancePlacementRequest":{ "type":"structure", "required":["InstanceId"], @@ -21180,7 +21690,8 @@ "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", "locationName":"LicenseSpecification" - } + }, + "MetadataOptions":{"shape":"InstanceMetadataOptionsRequest"} } }, "RunScheduledInstancesRequest":{ @@ -24586,6 +25097,10 @@ "VolumeType":{ "shape":"VolumeType", "locationName":"volumeType" + }, + "FastRestored":{ + "shape":"Boolean", + "locationName":"fastRestored" } } }, diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 9b37c6bd719..11257145ca7 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -44,7 +44,7 @@ "CreateCapacityReservation": "Creates a new Capacity Reservation with the specified attributes.
Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon Elastic Compute Cloud User Guide.
Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.
Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Limits in the Amazon Elastic Compute Cloud User Guide.
", "CreateClientVpnEndpoint": "Creates a Client VPN endpoint. A Client VPN endpoint is the resource you create and configure to enable and manage client VPN sessions. It is the destination endpoint at which all client VPN sessions are terminated.
", "CreateClientVpnRoute": "Adds a route to a network to a Client VPN endpoint. Each Client VPN endpoint has a route table that describes the available destination network routes. Each route in the route table specifies the path for traffic to specific resources or networks.
", - "CreateCustomerGateway": "Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).
For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).
Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1
Region, and 9059, which is reserved in the eu-west-1
Region.
For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.
You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.
Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).
For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).
Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1
Region, and 9059, which is reserved in the eu-west-1
Region.
For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.
To create more than one customer gateway with the same VPN type, IP address, and BGP ASN, specify a unique device name for each customer gateway. Identical requests return information about the existing customer gateway and do not create new customer gateways.
Creates a default subnet with a size /20
IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.
Creates a default VPC with a size /16
IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.
If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.
If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.
", "CreateDhcpOptions": "Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.
domain-name-servers
- The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name
, you must set domain-name-servers
to a custom DNS server.
domain-name
- If you're using AmazonProvidedDNS in us-east-1
, specify ec2.internal
. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal
(for example, ap-northeast-1.compute.internal
). Otherwise, specify a domain name (for example, MyCompany.com
). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.
ntp-servers
- The IP addresses of up to four Network Time Protocol (NTP) servers.
netbios-name-servers
- The IP addresses of up to four NetBIOS name servers.
netbios-node-type
- The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.
Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers
option either to AmazonProvidedDNS
or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.
Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.
", "DescribeExportImageTasks": "Describes the specified export image tasks or all your export image tasks.
", "DescribeExportTasks": "Describes the specified export instance tasks or all your export instance tasks.
", + "DescribeFastSnapshotRestores": "Describes the state of fast snapshot restores for your snapshots.
", "DescribeFleetHistory": "Describes the events for the specified EC2 Fleet during the specified time.
", "DescribeFleetInstances": "Describes the running instances for the specified EC2 Fleet.
", "DescribeFleets": "Describes the specified EC2 Fleets or all your EC2 Fleets.
", @@ -240,6 +241,7 @@ "DetachVolume": "Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy
state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.
When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.
For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.
", "DetachVpnGateway": "Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).
You must wait for the attachment's state to switch to detached
before you can delete the VPC or attach a different VPC to the virtual private gateway.
Disables EBS encryption by default for your account in the current Region.
After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.
Disabling encryption by default does not change the encryption status of your existing volumes.
For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
", + "DisableFastSnapshotRestores": "Disables fast snapshot restores for the specified snapshots in the specified Availability Zones.
", "DisableTransitGatewayRouteTablePropagation": "Disables the specified resource attachment from propagating routes to the specified propagation route table.
", "DisableVgwRoutePropagation": "Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.
", "DisableVpcClassicLink": "Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.
", @@ -252,6 +254,7 @@ "DisassociateTransitGatewayRouteTable": "Disassociates a resource attachment from a transit gateway route table.
", "DisassociateVpcCidrBlock": "Disassociates a CIDR block from a VPC. To disassociate the CIDR block, you must specify its association ID. You can get the association ID by using DescribeVpcs. You must detach or delete all gateways and resources that are associated with the CIDR block before you can disassociate it.
You cannot disassociate the CIDR block with which you originally created the VPC (the primary CIDR block).
", "EnableEbsEncryptionByDefault": "Enables EBS encryption by default for your account in the current Region.
After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.
Enabling encryption by default has no effect on the encryption status of your existing volumes.
After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported Instance Types.
", + "EnableFastSnapshotRestores": "Enables fast snapshot restores for the specified snapshots in the specified Availability Zones.
You get the full benefit of fast snapshot restores after they enter the enabled
state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. To disable fast snapshot restores, use DisableFastSnapshotRestores.
Enables the specified attachment to propagate routes to the specified propagation route table.
", "EnableVgwRoutePropagation": "Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.
", "EnableVolumeIO": "Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.
", @@ -292,6 +295,7 @@ "ModifyInstanceCapacityReservationAttributes": "Modifies the Capacity Reservation settings for a stopped instance. Use this action to configure an instance to target a specific Capacity Reservation, run in any open
Capacity Reservation with matching attributes, or run On-Demand Instance capacity.
Modifies the credit option for CPU usage on a running or stopped T2 or T3 instance. The credit options are standard
and unlimited
.
For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.
", "ModifyInstanceEventStartTime": "Modifies the start time for a scheduled Amazon EC2 instance event.
", + "ModifyInstanceMetadataOptions": "Modify the instance metadata parameters on a running or stopped instance. When you modify the parameters on a stopped instance, they are applied when the instance is started. When you modify the parameters on a running instance, the API responds with a state of “pending”. After the parameter modifications are successfully applied to the instance, the state of the modifications changes from “pending” to “applied” in subsequent describe-instances API calls. For more information, see Instance Metadata and User Data.
", "ModifyInstancePlacement": "Modifies the placement attributes for a specified instance. You can do the following:
Modify the affinity between an instance and a Dedicated Host. When affinity is set to host
and the instance is not associated with a specific Dedicated Host, the next time the instance is launched, it is automatically associated with the host on which it lands. If the instance is restarted or rebooted, this relationship persists.
Change the Dedicated Host with which an instance is associated.
Change the instance tenancy of an instance from host
to dedicated
, or from dedicated
to host
.
Move an instance to or from a placement group.
At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.
To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped
state.
Modifies a launch template. You can specify which version of the launch template to set as the default version. When launching an instance, the default version applies when a launch template version is not specified.
", "ModifyNetworkInterfaceAttribute": "Modifies the specified network interface attribute. You can specify only one attribute at a time. You can use this action to attach and detach security groups from an existing EC2 instance.
", @@ -734,7 +738,7 @@ } }, "AttachVolumeRequest": { - "base": "Contains the parameters for AttachVolume.
", + "base": null, "refs": { } }, @@ -889,6 +893,13 @@ "AvailabilityZone$State": "The state of the Availability Zone.
" } }, + "AvailabilityZoneStringList": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresRequest$AvailabilityZones": "One or more Availability Zones. For example, us-east-2a
.
One or more Availability Zones. For example, us-east-2a
.
The capacity information for instances launched onto the Dedicated Host.
", "refs": { @@ -1039,7 +1050,7 @@ "CreateRouteTableRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action without actually making the request. Provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
", + "CreateSnapshotsRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.
", "DetachVolumeRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The updated status of encryption by default.
", + "DisableFastSnapshotRestoresRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, it returns an error.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true
depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
In no case can you remove encryption from an encrypted volume.
Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.
", "EbsInstanceBlockDevice$DeleteOnTermination": "Indicates whether the volume is deleted on instance termination.
", "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "Indicates whether the volume is deleted on instance termination.
", - "EnableEbsEncryptionByDefaultRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The updated status of encryption by default.
", + "EnableFastSnapshotRestoresRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, it returns an error.
Returns true
if the request succeeds; otherwise, it returns an error.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Is true
if the request succeeds, and an error otherwise.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, returns an error.
Indicates whether the volume is encrypted.
", + "Volume$FastRestored": "Indicates whether the volume was created using fast snapshot restore.
", "VolumeAttachment$DeleteOnTermination": "Indicates whether the EBS volume is deleted on instance termination.
", "Vpc$IsDefault": "Indicates whether the VPC is the default VPC.
", "VpcClassicLink$ClassicLinkEnabled": "Indicates whether the VPC is enabled for ClassicLink.
", @@ -2079,12 +2095,12 @@ } }, "CopySnapshotRequest": { - "base": "Contains the parameters for CopySnapshot.
", + "base": null, "refs": { } }, "CopySnapshotResult": { - "base": "Contains the output of CopySnapshot.
", + "base": null, "refs": { } }, @@ -2386,7 +2402,7 @@ } }, "CreateSnapshotRequest": { - "base": "Contains the parameters for CreateSnapshot.
", + "base": null, "refs": { } }, @@ -2532,7 +2548,7 @@ } }, "CreateVolumeRequest": { - "base": "Contains the parameters for CreateVolume.
", + "base": null, "refs": { } }, @@ -3040,7 +3056,7 @@ } }, "DeleteSnapshotRequest": { - "base": "Contains the parameters for DeleteSnapshot.
", + "base": null, "refs": { } }, @@ -3140,7 +3156,7 @@ } }, "DeleteVolumeRequest": { - "base": "Contains the parameters for DeleteVolume.
", + "base": null, "refs": { } }, @@ -3497,6 +3513,34 @@ "refs": { } }, + "DescribeFastSnapshotRestoreSuccessItem": { + "base": "Describes fast snapshot restores for a snapshot.
", + "refs": { + "DescribeFastSnapshotRestoreSuccessSet$member": null + } + }, + "DescribeFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoresResult$FastSnapshotRestores": "Information about the state of fast snapshot restores.
" + } + }, + "DescribeFastSnapshotRestoresMaxResults": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoresRequest$MaxResults": "The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
Describes the instances that could not be launched by the fleet.
", "refs": { @@ -4042,12 +4086,12 @@ } }, "DescribeSnapshotAttributeRequest": { - "base": "Contains the parameters for DescribeSnapshotAttribute.
", + "base": null, "refs": { } }, "DescribeSnapshotAttributeResult": { - "base": "Contains the output of DescribeSnapshotAttribute.
", + "base": null, "refs": { } }, @@ -4252,12 +4296,12 @@ } }, "DescribeVolumeAttributeRequest": { - "base": "Contains the parameters for DescribeVolumeAttribute.
", + "base": null, "refs": { } }, "DescribeVolumeAttributeResult": { - "base": "Contains the output of DescribeVolumeAttribute.
", + "base": null, "refs": { } }, @@ -4467,7 +4511,7 @@ } }, "DetachVolumeRequest": { - "base": "Contains the parameters for DetachVolume.
", + "base": null, "refs": { } }, @@ -4549,6 +4593,58 @@ "refs": { } }, + "DisableFastSnapshotRestoreErrorItem": { + "base": "Contains information about the errors that occurred when disabling fast snapshot restores.
", + "refs": { + "DisableFastSnapshotRestoreErrorSet$member": null + } + }, + "DisableFastSnapshotRestoreErrorSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresResult$Unsuccessful": "Information about the snapshots for which fast snapshot restores could not be disabled.
" + } + }, + "DisableFastSnapshotRestoreStateError": { + "base": "Describes an error that occurred when disabling fast snapshot restores.
", + "refs": { + "DisableFastSnapshotRestoreStateErrorItem$Error": "The error.
" + } + }, + "DisableFastSnapshotRestoreStateErrorItem": { + "base": "Contains information about an error that occurred when disabling fast snapshot restores.
", + "refs": { + "DisableFastSnapshotRestoreStateErrorSet$member": null + } + }, + "DisableFastSnapshotRestoreStateErrorSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoreErrorItem$FastSnapshotRestoreStateErrors": "The errors.
" + } + }, + "DisableFastSnapshotRestoreSuccessItem": { + "base": "Describes fast snapshot restores that were successfully disabled.
", + "refs": { + "DisableFastSnapshotRestoreSuccessSet$member": null + } + }, + "DisableFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresResult$Successful": "Information about the snapshots for which fast snapshot restores were successfully disabled.
" + } + }, + "DisableFastSnapshotRestoresRequest": { + "base": null, + "refs": { + } + }, + "DisableFastSnapshotRestoresResult": { + "base": null, + "refs": { + } + }, "DisableTransitGatewayRouteTablePropagationRequest": { "base": null, "refs": { @@ -4890,7 +4986,7 @@ "ElasticInferenceAcceleratorAssociationList": { "base": null, "refs": { - "Instance$ElasticInferenceAcceleratorAssociations": "The elastic inference accelerator associated with the instance.
" + "Instance$ElasticInferenceAcceleratorAssociations": "The elastic inference accelerator associated with the instance.
" } }, "ElasticInferenceAccelerators": { @@ -4909,6 +5005,58 @@ "refs": { } }, + "EnableFastSnapshotRestoreErrorItem": { + "base": "Contains information about the errors that occurred when enabling fast snapshot restores.
", + "refs": { + "EnableFastSnapshotRestoreErrorSet$member": null + } + }, + "EnableFastSnapshotRestoreErrorSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoresResult$Unsuccessful": "Information about the snapshots for which fast snapshot restores could not be enabled.
" + } + }, + "EnableFastSnapshotRestoreStateError": { + "base": "Describes an error that occurred when enabling fast snapshot restores.
", + "refs": { + "EnableFastSnapshotRestoreStateErrorItem$Error": "The error.
" + } + }, + "EnableFastSnapshotRestoreStateErrorItem": { + "base": "Contains information about an error that occurred when enabling fast snapshot restores.
", + "refs": { + "EnableFastSnapshotRestoreStateErrorSet$member": null + } + }, + "EnableFastSnapshotRestoreStateErrorSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoreErrorItem$FastSnapshotRestoreStateErrors": "The errors.
" + } + }, + "EnableFastSnapshotRestoreSuccessItem": { + "base": "Describes fast snapshot restores that were successfully enabled.
", + "refs": { + "EnableFastSnapshotRestoreSuccessSet$member": null + } + }, + "EnableFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoresResult$Successful": "Information about the snapshots for which fast snapshot restores were successfully enabled.
" + } + }, + "EnableFastSnapshotRestoresRequest": { + "base": null, + "refs": { + } + }, + "EnableFastSnapshotRestoresResult": { + "base": null, + "refs": { + } + }, "EnableTransitGatewayRouteTablePropagationRequest": { "base": null, "refs": { @@ -4925,7 +5073,7 @@ } }, "EnableVolumeIORequest": { - "base": "Contains the parameters for EnableVolumeIO.
", + "base": null, "refs": { } }, @@ -5129,6 +5277,14 @@ "DeleteQueuedReservedInstancesResult$FailedQueuedPurchaseDeletions": "Information about the queued purchases that could not be deleted.
" } }, + "FastSnapshotRestoreStateCode": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoreSuccessItem$State": "The state of fast snapshot restores.
", + "DisableFastSnapshotRestoreSuccessItem$State": "The state of fast snapshot restores for the snapshot.
", + "EnableFastSnapshotRestoreSuccessItem$State": "The state of fast snapshot restores.
" + } + }, "Filter": { "base": "A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
One or more filters.
dhcp-options-id
- The ID of a DHCP options set.
key
- The key for one of the options (for example, domain-name
).
value
- The value for one of the options.
owner-id
- The ID of the AWS account that owns the DHCP options set.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
The filters.
availability-zone
- The Availability Zone in which the Elastic Graphics accelerator resides.
elastic-gpu-health
- The status of the Elastic Graphics accelerator (OK
| IMPAIRED
).
elastic-gpu-state
- The state of the Elastic Graphics accelerator (ATTACHED
).
elastic-gpu-type
- The type of Elastic Graphics accelerator; for example, eg1.medium
.
instance-id
- The ID of the instance to which the Elastic Graphics accelerator is associated.
Filter tasks using the task-state
filter and one of the following values: active
, completed
, deleting
, or deleted
.
The filters. The possible values are:
availability-zone
: The Availability Zone of the snapshot.
owner-id
: The ID of the AWS account that owns the snapshot.
snapshot-id
: The ID of the snapshot.
state
: The state of fast snapshot restores for the snapshot (enabling
| optimizing
| enabled
| disabling
| disabled
).
The filters.
instance-type
- The instance type.
The filters.
activity-status
- The progress of the EC2 Fleet ( error
| pending-fulfillment
| pending-termination
| fulfilled
).
excess-capacity-termination-policy
- Indicates whether to terminate running instances if the target capacity is decreased below the current EC2 Fleet size (true
| false
).
fleet-state
- The state of the EC2 Fleet (submitted
| active
| deleted
| failed
| deleted-running
| deleted-terminating
| modifying
).
replace-unhealthy-instances
- Indicates whether EC2 Fleet should replace unhealthy instances (true
| false
).
type
- The type of request (instant
| request
| maintain
).
One or more filters.
deliver-log-status
- The status of the logs delivery (SUCCESS
| FAILED
).
log-destination-type
- The type of destination to which the flow log publishes data. Possible destination types include cloud-watch-logs
and S3
.
flow-log-id
- The ID of the flow log.
log-group-name
- The name of the log group.
resource-id
- The ID of the VPC, subnet, or network interface.
traffic-type
- The type of traffic (ACCEPT
| REJECT
| ALL
).
The filters.
", "DescribeInstanceCreditSpecificationsRequest$Filters": "The filters.
instance-id
- The ID of the instance.
The filters.
availability-zone
- The Availability Zone of the instance.
event.code
- The code for the scheduled event (instance-reboot
| system-reboot
| system-maintenance
| instance-retirement
| instance-stop
).
event.description
- A description of the event.
event.instance-event-id
- The ID of the event whose date and time you are modifying.
event.not-after
- The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z
).
event.not-before
- The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z
).
event.not-before-deadline
- The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z
).
instance-state-code
- The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
instance-state-name
- The state of the instance (pending
| running
| shutting-down
| terminated
| stopping
| stopped
).
instance-status.reachability
- Filters on instance status where the name is reachability
(passed
| failed
| initializing
| insufficient-data
).
instance-status.status
- The status of the instance (ok
| impaired
| initializing
| insufficient-data
| not-applicable
).
system-status.reachability
- Filters on system status where the name is reachability
(passed
| failed
| initializing
| insufficient-data
).
system-status.status
- The system status of the instance (ok
| impaired
| initializing
| insufficient-data
| not-applicable
).
The filters.
affinity
- The affinity setting for an instance running on a Dedicated Host (default
| host
).
architecture
- The instance architecture (i386
| x86_64
| arm64
).
availability-zone
- The Availability Zone of the instance.
block-device-mapping.attach-time
- The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z
.
block-device-mapping.delete-on-termination
- A Boolean that indicates whether the EBS volume is deleted on instance termination.
block-device-mapping.device-name
- The device name specified in the block device mapping (for example, /dev/sdh
or xvdh
).
block-device-mapping.status
- The status for the EBS volume (attaching
| attached
| detaching
| detached
).
block-device-mapping.volume-id
- The volume ID of the EBS volume.
client-token
- The idempotency token you provided when you launched the instance.
dns-name
- The public DNS name of the instance.
group-id
- The ID of the security group for the instance. EC2-Classic only.
group-name
- The name of the security group for the instance. EC2-Classic only.
hibernation-options.configured
- A Boolean that indicates whether the instance is enabled for hibernation. A value of true
means that the instance is enabled for hibernation.
host-id
- The ID of the Dedicated Host on which the instance is running, if applicable.
hypervisor
- The hypervisor type of the instance (ovm
| xen
).
iam-instance-profile.arn
- The instance profile associated with the instance. Specified as an ARN.
image-id
- The ID of the image used to launch the instance.
instance-id
- The ID of the instance.
instance-lifecycle
- Indicates whether this is a Spot Instance or a Scheduled Instance (spot
| scheduled
).
instance-state-code
- The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
instance-state-name
- The state of the instance (pending
| running
| shutting-down
| terminated
| stopping
| stopped
).
instance-type
- The type of instance (for example, t2.micro
).
instance.group-id
- The ID of the security group for the instance.
instance.group-name
- The name of the security group for the instance.
ip-address
- The public IPv4 address of the instance.
kernel-id
- The kernel ID.
key-name
- The name of the key pair used when the instance was launched.
launch-index
- When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
launch-time
- The time when the instance was launched.
monitoring-state
- Indicates whether detailed monitoring is enabled (disabled
| enabled
).
network-interface.addresses.private-ip-address
- The private IPv4 address associated with the network interface.
network-interface.addresses.primary
- Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
network-interface.addresses.association.public-ip
- The ID of the association of an Elastic IP address (IPv4) with a network interface.
network-interface.addresses.association.ip-owner-id
- The owner ID of the private IPv4 address associated with the network interface.
network-interface.association.public-ip
- The address of the Elastic IP address (IPv4) bound to the network interface.
network-interface.association.ip-owner-id
- The owner of the Elastic IP address (IPv4) associated with the network interface.
network-interface.association.allocation-id
- The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
network-interface.association.association-id
- The association ID returned when the network interface was associated with an IPv4 address.
network-interface.attachment.attachment-id
- The ID of the interface attachment.
network-interface.attachment.instance-id
- The ID of the instance to which the network interface is attached.
network-interface.attachment.instance-owner-id
- The owner ID of the instance to which the network interface is attached.
network-interface.attachment.device-index
- The device index to which the network interface is attached.
network-interface.attachment.status
- The status of the attachment (attaching
| attached
| detaching
| detached
).
network-interface.attachment.attach-time
- The time that the network interface was attached to an instance.
network-interface.attachment.delete-on-termination
- Specifies whether the attachment is deleted when an instance is terminated.
network-interface.availability-zone
- The Availability Zone for the network interface.
network-interface.description
- The description of the network interface.
network-interface.group-id
- The ID of a security group associated with the network interface.
network-interface.group-name
- The name of a security group associated with the network interface.
network-interface.ipv6-addresses.ipv6-address
- The IPv6 address associated with the network interface.
network-interface.mac-address
- The MAC address of the network interface.
network-interface.network-interface-id
- The ID of the network interface.
network-interface.owner-id
- The ID of the owner of the network interface.
network-interface.private-dns-name
- The private DNS name of the network interface.
network-interface.requester-id
- The requester ID for the network interface.
network-interface.requester-managed
- Indicates whether the network interface is being managed by AWS.
network-interface.status
- The status of the network interface (available
) | in-use
).
network-interface.source-dest-check
- Whether the network interface performs source/destination checking. A value of true
means that checking is enabled, and false
means that checking is disabled. The value must be false
for the network interface to perform network address translation (NAT) in your VPC.
network-interface.subnet-id
- The ID of the subnet for the network interface.
network-interface.vpc-id
- The ID of the VPC for the network interface.
owner-id
- The AWS account ID of the instance owner.
placement-group-name
- The name of the placement group for the instance.
placement-partition-number
- The partition in which the instance is located.
platform
- The platform. To list only Windows instances, use windows
.
private-dns-name
- The private IPv4 DNS name of the instance.
private-ip-address
- The private IPv4 address of the instance.
product-code
- The product code associated with the AMI used to launch the instance.
product-code.type
- The type of product code (devpay
| marketplace
).
ramdisk-id
- The RAM disk ID.
reason
- The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
requester-id
- The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
reservation-id
- The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
root-device-type
- The type of the root device volume (ebs
| instance-store
).
source-dest-check
- Indicates whether the instance performs source/destination checking. A value of true
means that checking is enabled, and false
means that checking is disabled. The value must be false
for the instance to perform network address translation (NAT) in your VPC.
spot-instance-request-id
- The ID of the Spot Instance request.
state-reason-code
- The reason code for the state change.
state-reason-message
- A message that describes the state change.
subnet-id
- The ID of the subnet for the instance.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
tenancy
- The tenancy of an instance (dedicated
| default
| host
).
virtualization-type
- The virtualization type of the instance (paravirtual
| hvm
).
vpc-id
- The ID of the VPC that the instance is running in.
The filters.
affinity
- The affinity setting for an instance running on a Dedicated Host (default
| host
).
architecture
- The instance architecture (i386
| x86_64
| arm64
).
availability-zone
- The Availability Zone of the instance.
block-device-mapping.attach-time
- The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z
.
block-device-mapping.delete-on-termination
- A Boolean that indicates whether the EBS volume is deleted on instance termination.
block-device-mapping.device-name
- The device name specified in the block device mapping (for example, /dev/sdh
or xvdh
).
block-device-mapping.status
- The status for the EBS volume (attaching
| attached
| detaching
| detached
).
block-device-mapping.volume-id
- The volume ID of the EBS volume.
client-token
- The idempotency token you provided when you launched the instance.
dns-name
- The public DNS name of the instance.
group-id
- The ID of the security group for the instance. EC2-Classic only.
group-name
- The name of the security group for the instance. EC2-Classic only.
hibernation-options.configured
- A Boolean that indicates whether the instance is enabled for hibernation. A value of true
means that the instance is enabled for hibernation.
host-id
- The ID of the Dedicated Host on which the instance is running, if applicable.
hypervisor
- The hypervisor type of the instance (ovm
| xen
).
iam-instance-profile.arn
- The instance profile associated with the instance. Specified as an ARN. image-id
- The ID of the image used to launch the instance.
instance-id
- The ID of the instance.
instance-lifecycle
- Indicates whether this is a Spot Instance or a Scheduled Instance (spot
| scheduled
).
instance-state-code
- The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
instance-state-name
- The state of the instance (pending
| running
| shutting-down
| terminated
| stopping
| stopped
).
instance-type
- The type of instance (for example, t2.micro
).
instance.group-id
- The ID of the security group for the instance.
instance.group-name
- The name of the security group for the instance.
ip-address
- The public IPv4 address of the instance.
kernel-id
- The kernel ID.
key-name
- The name of the key pair used when the instance was launched.
launch-index
- When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
launch-time
- The time when the instance was launched.
metadata-http-tokens
- The metadata request authorization state (optional
| required
)
metadata-http-put-response-hop-limit
- The http metadata request put response hop limit (integer, possible values 1
to 64
)
metadata-http-endpoint
- Enable or disable metadata access on http endpoint (enabled
| disabled
)
monitoring-state
- Indicates whether detailed monitoring is enabled (disabled
| enabled
).
network-interface.addresses.private-ip-address
- The private IPv4 address associated with the network interface.
network-interface.addresses.primary
- Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
network-interface.addresses.association.public-ip
- The ID of the association of an Elastic IP address (IPv4) with a network interface.
network-interface.addresses.association.ip-owner-id
- The owner ID of the private IPv4 address associated with the network interface.
network-interface.association.public-ip
- The address of the Elastic IP address (IPv4) bound to the network interface.
network-interface.association.ip-owner-id
- The owner of the Elastic IP address (IPv4) associated with the network interface.
network-interface.association.allocation-id
- The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
network-interface.association.association-id
- The association ID returned when the network interface was associated with an IPv4 address.
network-interface.attachment.attachment-id
- The ID of the interface attachment.
network-interface.attachment.instance-id
- The ID of the instance to which the network interface is attached.
network-interface.attachment.instance-owner-id
- The owner ID of the instance to which the network interface is attached.
network-interface.attachment.device-index
- The device index to which the network interface is attached.
network-interface.attachment.status
- The status of the attachment (attaching
| attached
| detaching
| detached
).
network-interface.attachment.attach-time
- The time that the network interface was attached to an instance.
network-interface.attachment.delete-on-termination
- Specifies whether the attachment is deleted when an instance is terminated.
network-interface.availability-zone
- The Availability Zone for the network interface.
network-interface.description
- The description of the network interface.
network-interface.group-id
- The ID of a security group associated with the network interface.
network-interface.group-name
- The name of a security group associated with the network interface.
network-interface.ipv6-addresses.ipv6-address
- The IPv6 address associated with the network interface.
network-interface.mac-address
- The MAC address of the network interface.
network-interface.network-interface-id
- The ID of the network interface.
network-interface.owner-id
- The ID of the owner of the network interface.
network-interface.private-dns-name
- The private DNS name of the network interface.
network-interface.requester-id
- The requester ID for the network interface.
network-interface.requester-managed
- Indicates whether the network interface is being managed by AWS.
network-interface.status
- The status of the network interface (available
) | in-use
).
network-interface.source-dest-check
- Whether the network interface performs source/destination checking. A value of true
means that checking is enabled, and false
means that checking is disabled. The value must be false
for the network interface to perform network address translation (NAT) in your VPC.
network-interface.subnet-id
- The ID of the subnet for the network interface.
network-interface.vpc-id
- The ID of the VPC for the network interface.
owner-id
- The AWS account ID of the instance owner.
placement-group-name
- The name of the placement group for the instance.
placement-partition-number
- The partition in which the instance is located.
platform
- The platform. To list only Windows instances, use windows
.
private-dns-name
- The private IPv4 DNS name of the instance.
private-ip-address
- The private IPv4 address of the instance.
product-code
- The product code associated with the AMI used to launch the instance.
product-code.type
- The type of product code (devpay
| marketplace
).
ramdisk-id
- The RAM disk ID.
reason
- The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
requester-id
- The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
reservation-id
- The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
root-device-type
- The type of the root device volume (ebs
| instance-store
).
source-dest-check
- Indicates whether the instance performs source/destination checking. A value of true
means that checking is enabled, and false
means that checking is disabled. The value must be false
for the instance to perform network address translation (NAT) in your VPC.
spot-instance-request-id
- The ID of the Spot Instance request.
state-reason-code
- The reason code for the state change.
state-reason-message
- A message that describes the state change.
subnet-id
- The ID of the subnet for the instance.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
tenancy
- The tenancy of an instance (dedicated
| default
| host
).
virtualization-type
- The virtualization type of the instance (paravirtual
| hvm
).
vpc-id
- The ID of the VPC that the instance is running in.
One or more filters.
attachment.state
- The current state of the attachment between the gateway and the VPC (available
). Present only if a VPC is attached.
attachment.vpc-id
- The ID of an attached VPC.
internet-gateway-id
- The ID of the Internet gateway.
owner-id
- The ID of the AWS account that owns the internet gateway.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
The filters.
fingerprint
- The fingerprint of the key pair.
key-name
- The name of the key pair.
One or more filters.
create-time
- The time the launch template version was created.
ebs-optimized
- A boolean that indicates whether the instance is optimized for Amazon EBS I/O.
iam-instance-profile
- The ARN of the IAM instance profile.
image-id
- The ID of the AMI.
instance-type
- The instance type.
is-default-version
- A boolean that indicates whether the launch template version is the default version.
kernel-id
- The kernel ID.
ram-disk-id
- The RAM disk ID.
One or more filters.
service-name
: The name of the service.
vpc-id
: The ID of the VPC in which the endpoint resides.
vpc-endpoint-id
: The ID of the endpoint.
vpc-endpoint-state
- The state of the endpoint (pendingAcceptance
| pending
| available
| deleting
| deleted
| rejected
| failed
).
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
One or more filters.
accepter-vpc-info.cidr-block
- The IPv4 CIDR block of the accepter VPC.
accepter-vpc-info.owner-id
- The AWS account ID of the owner of the accepter VPC.
accepter-vpc-info.vpc-id
- The ID of the accepter VPC.
expiration-time
- The expiration date and time for the VPC peering connection.
requester-vpc-info.cidr-block
- The IPv4 CIDR block of the requester's VPC.
requester-vpc-info.owner-id
- The AWS account ID of the owner of the requester VPC.
requester-vpc-info.vpc-id
- The ID of the requester VPC.
status-code
- The status of the VPC peering connection (pending-acceptance
| failed
| expired
| provisioning
| active
| deleting
| deleted
| rejected
).
status-message
- A message that provides more information about the status of the VPC peering connection, if applicable.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
vpc-peering-connection-id
- The ID of the VPC peering connection.
One or more filters.
cidr
- The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28
).
cidr-block-association.cidr-block
- An IPv4 CIDR block associated with the VPC.
cidr-block-association.association-id
- The association ID for an IPv4 CIDR block associated with the VPC.
cidr-block-association.state
- The state of an IPv4 CIDR block associated with the VPC.
dhcp-options-id
- The ID of a set of DHCP options.
ipv6-cidr-block-association.ipv6-cidr-block
- An IPv6 CIDR block associated with the VPC.
ipv6-cidr-block-association.association-id
- The association ID for an IPv6 CIDR block associated with the VPC.
ipv6-cidr-block-association.state
- The state of an IPv6 CIDR block associated with the VPC.
isDefault
- Indicates whether the VPC is the default VPC.
owner-id
- The ID of the AWS account that owns the VPC.
state
- The state of the VPC (pending
| available
).
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
vpc-id
- The ID of the VPC.
One or more filters.
customer-gateway-configuration
- The configuration information for the customer gateway.
customer-gateway-id
- The ID of a customer gateway associated with the VPN connection.
state
- The state of the VPN connection (pending
| available
| deleting
| deleted
).
option.static-routes-only
- Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).
route.destination-cidr-block
- The destination CIDR block. This corresponds to the subnet used in a customer data center.
bgp-asn
- The BGP Autonomous System Number (ASN) associated with a BGP device.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
type
- The type of VPN connection. Currently the only supported type is ipsec.1
.
vpn-connection-id
- The ID of the VPN connection.
vpn-gateway-id
- The ID of a virtual private gateway associated with the VPN connection.
One or more filters.
customer-gateway-configuration
- The configuration information for the customer gateway.
customer-gateway-id
- The ID of a customer gateway associated with the VPN connection.
state
- The state of the VPN connection (pending
| available
| deleting
| deleted
).
option.static-routes-only
- Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).
route.destination-cidr-block
- The destination CIDR block. This corresponds to the subnet used in a customer data center.
bgp-asn
- The BGP Autonomous System Number (ASN) associated with a BGP device.
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
type
- The type of VPN connection. Currently the only supported type is ipsec.1
.
vpn-connection-id
- The ID of the VPN connection.
vpn-gateway-id
- The ID of a virtual private gateway associated with the VPN connection.
transit-gateway-id
- The ID of a transit gateway associated with the VPN connection.
One or more filters.
amazon-side-asn
- The Autonomous System Number (ASN) for the Amazon side of the gateway.
attachment.state
- The current state of the attachment between the gateway and the VPC (attaching
| attached
| detaching
| detached
).
attachment.vpc-id
- The ID of an attached VPC.
availability-zone
- The Availability Zone for the virtual private gateway (if applicable).
state
- The state of the virtual private gateway (pending
| available
| deleting
| deleted
).
tag
:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
type
- The type of virtual private gateway. Currently the only supported type is ipsec.1
.
vpn-gateway-id
- The ID of the virtual private gateway.
One or more filters. The possible values are:
attachment.transit-gateway-attachment-id
- The id of the transit gateway attachment.
attachment.resource-id
- The resource id of the transit gateway attachment.
route-search.exact-match
- The exact match of the specified filter.
route-search.longest-prefix-match
- The longest prefix that matches the route.
route-search.subnet-of-match
- The routes with a subnet that match the specified CIDR filter.
route-search.supernet-of-match
- The routes with a CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29.
state
- The state of the attachment (available
| deleted
| deleting
| failed
| modifying
| pendingAcceptance
| pending
| rollingBack
| rejected
| rejecting
).
transit-gateway-route-destination-cidr-block
- The CIDR range.
type
- The type of route (active
| blackhole
).
One or more filters. The possible values are:
transit-gateway-route-table-id
- The ID of the transit gateway route table.
The tenancy for the instance.
" } }, + "HttpTokensState": { + "base": null, + "refs": { + "InstanceMetadataOptionsRequest$HttpTokens": "The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional
.
If the state is optional
, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.
If the state is required
, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns the version 2.0 credentials; the version 1.0 credentials are not available.
The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional
.
If the state is optional
, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.
If the state is required
, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credential always returns the version 2.0 credentials; the version 1.0 credentials are not available.
The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional
.
If the state is optional
, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.
If the state is required
, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credential always returns the version 2.0 credentials; the version 1.0 credentials are not available.
Indicates the type of instance launches that the Capacity Reservation accepts. The options include:
open
- The Capacity Reservation automatically matches all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes run in the Capacity Reservation automatically without specifying any additional parameters.
targeted
- The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.
Default: open
This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled
.
If you specify a value of disabled
, you will not be able to access your instance metadata.
This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled
.
If you specify a value of disabled
, you will not be able to access your instance metadata.
This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the existing state is maintained.
If you specify a value of disabled
, you will not be able to access your instance metadata.
The metadata options for the instance.
", + "refs": { + "RunInstancesRequest$MetadataOptions": "The metadata options for the instance. For more information, see Instance Metadata and User Data.
" + } + }, + "InstanceMetadataOptionsResponse": { + "base": "The metadata options for the instance.
", + "refs": { + "Instance$MetadataOptions": "The metadata options for the instance.
", + "ModifyInstanceMetadataOptionsResult$InstanceMetadataOptions": "The metadata options for the instance.
" + } + }, + "InstanceMetadataOptionsState": { + "base": null, + "refs": { + "InstanceMetadataOptionsResponse$State": "The state of the metadata option changes.
pending
- The metadata options are being updated and the instance is not ready to process metadata traffic with the new selection.
applied
- The metadata options have been successfully applied on the instance.
Describes the monitoring of an instance.
", "refs": { @@ -6527,6 +6719,8 @@ "InstanceCapacity$AvailableCapacity": "The number of instances that can still be launched onto the Dedicated Host.
", "InstanceCapacity$TotalCapacity": "The total number of instances that can be launched onto the Dedicated Host.
", "InstanceCount$InstanceCount": "The number of listed Reserved Instances in the state specified by the state
.
The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.
Default: 1
Possible values: Integers from 1 to 64
", + "InstanceMetadataOptionsResponse$HttpPutResponseHopLimit": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.
Default: 1
Possible values: Integers from 1 to 64
", "InstanceNetworkInterfaceAttachment$DeviceIndex": "The index of the device on the instance for the network interface attachment.
", "InstanceNetworkInterfaceSpecification$DeviceIndex": "The position of the network interface in the attachment order. A primary network interface has a device index of 0.
If you specify a network interface when launching an instance, you must specify the device index.
", "InstanceNetworkInterfaceSpecification$Ipv6AddressCount": "A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.
", @@ -6552,6 +6746,7 @@ "LaunchTemplateSpotMarketOptions$BlockDurationMinutes": "The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
", "LaunchTemplateSpotMarketOptionsRequest$BlockDurationMinutes": "The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
", "ModifyCapacityReservationRequest$InstanceCount": "The number of instances for which to reserve capacity.
", + "ModifyInstanceMetadataOptionsRequest$HttpPutResponseHopLimit": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. If no parameter is specified, the existing state is maintained.
Possible values: Integers from 1 to 64
", "ModifyInstancePlacementRequest$PartitionNumber": "Reserved for future use.
", "ModifySpotFleetRequestRequest$TargetCapacity": "The size of the fleet.
", "ModifySpotFleetRequestRequest$OnDemandTargetCapacity": "The number of On-Demand Instances in the fleet.
", @@ -6834,7 +7029,6 @@ "base": null, "refs": { "CopyImageRequest$KmsKeyId": "An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". For example:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Alias name: alias/ExampleAlias
Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the Region that the snapshot is being copied to.
", - "CopySnapshotRequest$KmsKeyId": "The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId
is specified, the encrypted state must be true
.
You can specify the CMK using any of the following:
Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.
Key alias. For example, alias/ExampleAlias.
Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.
", "CreateVolumeRequest$KmsKeyId": "The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId
is specified, the encrypted state must be true
.
You can specify the CMK using any of the following:
Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.
Key alias. For example, alias/ExampleAlias.
Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.
", "ImportImageRequest$KmsKeyId": "An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
Key ID
Key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the Region that the AMI is being copied to.
", "ImportSnapshotRequest$KmsKeyId": "An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
Key ID
Key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the Region that the snapshot is being copied to.
", @@ -7323,6 +7517,21 @@ "MillisecondDateTime": { "base": null, "refs": { + "DescribeFastSnapshotRestoreSuccessItem$EnablingTime": "The time at which fast snapshot restores entered the enabling
state.
The time at which fast snapshot restores entered the optimizing
state.
The time at which fast snapshot restores entered the enabled
state.
The time at which fast snapshot restores entered the disabling
state.
The time at which fast snapshot restores entered the disabled
state.
The time at which fast snapshot restores entered the enabling
state.
The time at which fast snapshot restores entered the optimizing
state.
The time at which fast snapshot restores entered the enabled
state.
The time at which fast snapshot restores entered the disabling
state.
The time at which fast snapshot restores entered the disabled
state.
The time at which fast snapshot restores entered the enabling
state.
The time at which fast snapshot restores entered the optimizing
state.
The time at which fast snapshot restores entered the enabled
state.
The time at which fast snapshot restores entered the disabling
state.
The time at which fast snapshot restores entered the disabled
state.
Time this snapshot was started. This is the same for all snapshots initiated by the same request.
", "VpcEndpoint$CreationTimestamp": "The date and time the VPC endpoint was created.
", "VpcEndpointConnection$CreationTimestamp": "The date and time the VPC endpoint was created.
" @@ -7438,6 +7647,16 @@ "refs": { } }, + "ModifyInstanceMetadataOptionsRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstanceMetadataOptionsResult": { + "base": null, + "refs": { + } + }, "ModifyInstancePlacementRequest": { "base": null, "refs": { @@ -7474,7 +7693,7 @@ } }, "ModifySnapshotAttributeRequest": { - "base": "Contains the parameters for ModifySnapshotAttribute.
", + "base": null, "refs": { } }, @@ -7540,7 +7759,7 @@ } }, "ModifyVolumeAttributeRequest": { - "base": "Contains the parameters for ModifyVolumeAttribute.
", + "base": null, "refs": { } }, @@ -7964,6 +8183,8 @@ "DescribeClientVpnTargetNetworksResult$NextToken": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
A token that indicates the next page of results.
", "DescribeExportImageTasksResult$NextToken": "The token to use to get the next page of results. This value is null
when there are no more results to return.
The token for the next page of results.
", + "DescribeFastSnapshotRestoresResult$NextToken": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The token to retrieve the next page of results.
", "DescribeFpgaImagesResult$NextToken": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The token to request the next page of results.
", @@ -9030,7 +9251,7 @@ } }, "ResetSnapshotAttributeRequest": { - "base": "Contains the parameters for ResetSnapshotAttribute.
", + "base": null, "refs": { } }, @@ -9558,7 +9779,9 @@ "SnapshotIdStringList": { "base": null, "refs": { - "DescribeSnapshotsRequest$SnapshotIds": "The snapshot IDs.
Default: Describes the snapshots for which you have create volume permissions.
" + "DescribeSnapshotsRequest$SnapshotIds": "The snapshot IDs.
Default: Describes the snapshots for which you have create volume permissions.
", + "DisableFastSnapshotRestoresRequest$SourceSnapshotIds": "The IDs of one or more snapshots. For example, snap-1234567890abcdef0
.
The IDs of one or more snapshots. For example, snap-1234567890abcdef0
. You can specify a snapshot that was shared with you from another AWS account.
The name of the Availability Zone.
", "AvailabilityZone$ZoneId": "The ID of the Availability Zone.
", "AvailabilityZoneMessage$Message": "The message about the Availability Zone.
", + "AvailabilityZoneStringList$member": null, "BillingProductList$member": null, "BlockDeviceMapping$DeviceName": "The device name (for example, /dev/sdh
or xvdh
).
The virtual device name (ephemeral
N). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0
and ephemeral1
. The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.
NVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.
Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.
", @@ -10013,6 +10237,7 @@ "CopyImageResult$ImageId": "The ID of the new AMI.
", "CopySnapshotRequest$Description": "A description for the EBS snapshot.
", "CopySnapshotRequest$DestinationRegion": "The destination Region to use in the PresignedUrl
parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region in a PresignedUrl
parameter, where it is required.
The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com
). With the AWS CLI, this is specified using the --region
parameter or the default Region in your AWS configuration file.
The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId
is specified, the encrypted state must be true
.
You can specify the CMK using any of the following:
Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.
Key alias. For example, alias/ExampleAlias.
Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.
", "CopySnapshotRequest$PresignedUrl": "When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.
The PresignedUrl
should use the snapshot source endpoint, the CopySnapshot
action, and include the SourceRegion
, SourceSnapshotId
, and DestinationRegion
parameters. The PresignedUrl
must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl
will cause the copy operation to fail asynchronously, and the snapshot will move to an error
state.
The ID of the Region that contains the snapshot to be copied.
", "CopySnapshotRequest$SourceSnapshotId": "The ID of the EBS snapshot to copy.
", @@ -10033,6 +10258,7 @@ "CreateClientVpnRouteRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.
", "CreateCustomerGatewayRequest$PublicIp": "The Internet-routable IP address for the customer gateway's outside interface. The address must be static.
", "CreateCustomerGatewayRequest$CertificateArn": "The Amazon Resource Name (ARN) for the customer gateway certificate.
", + "CreateCustomerGatewayRequest$DeviceName": "A name for the customer gateway device.
Length Constraints: Up to 255 characters.
", "CreateDefaultSubnetRequest$AvailabilityZone": "The Availability Zone in which to create the default subnet.
", "CreateEgressOnlyInternetGatewayRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.
", "CreateEgressOnlyInternetGatewayResult$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", @@ -10125,6 +10351,7 @@ "CustomerGateway$CertificateArn": "The Amazon Resource Name (ARN) for the customer gateway certificate.
", "CustomerGateway$State": "The current state of the customer gateway (pending | available | deleting | deleted
).
The type of VPN connection the customer gateway supports (ipsec.1
).
The name of customer gateway device.
", "CustomerGatewayIdStringList$member": null, "DeleteClientVpnRouteRequest$TargetVpcSubnetId": "The ID of the target subnet used by the route.
", "DeleteClientVpnRouteRequest$DestinationCidrBlock": "The IPv4 address range, in CIDR notation, of the route to be deleted.
", @@ -10166,6 +10393,11 @@ "DescribeEgressOnlyInternetGatewaysResult$NextToken": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The token to request the next page of results.
", "DescribeElasticGpusResult$NextToken": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The ID of the snapshot.
", + "DescribeFastSnapshotRestoreSuccessItem$AvailabilityZone": "The Availability Zone.
", + "DescribeFastSnapshotRestoreSuccessItem$StateTransitionReason": "The reason for the state transition. The possible values are as follows:
Client.UserInitiated
- The state successfully transitioned to enabling
or disabling
.
Client.UserInitiated - Lifecycle state transition
- The state successfully transitioned to optimizing
, enabled
, or disabled
.
The ID of the AWS account that owns the snapshot.
", + "DescribeFastSnapshotRestoreSuccessItem$OwnerAlias": "The alias of the snapshot owner.
", "DescribeFleetError$ErrorCode": "The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.
", "DescribeFleetError$ErrorMessage": "The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.
", "DescribeFleetHistoryRequest$NextToken": "The token for the next set of results.
", @@ -10310,6 +10542,15 @@ "DhcpOptionsIdStringList$member": null, "DirectoryServiceAuthentication$DirectoryId": "The ID of the Active Directory used for authentication.
", "DirectoryServiceAuthenticationRequest$DirectoryId": "The ID of the Active Directory to be used for authentication.
", + "DisableFastSnapshotRestoreErrorItem$SnapshotId": "The ID of the snapshot.
", + "DisableFastSnapshotRestoreStateError$Code": "The error code.
", + "DisableFastSnapshotRestoreStateError$Message": "The error message.
", + "DisableFastSnapshotRestoreStateErrorItem$AvailabilityZone": "The Availability Zone.
", + "DisableFastSnapshotRestoreSuccessItem$SnapshotId": "The ID of the snapshot.
", + "DisableFastSnapshotRestoreSuccessItem$AvailabilityZone": "The Availability Zone.
", + "DisableFastSnapshotRestoreSuccessItem$StateTransitionReason": "The reason for the state transition. The possible values are as follows:
Client.UserInitiated
- The state successfully transitioned to enabling
or disabling
.
Client.UserInitiated - Lifecycle state transition
- The state successfully transitioned to optimizing
, enabled
, or disabled
.
The ID of the AWS account that owns the snapshot.
", + "DisableFastSnapshotRestoreSuccessItem$OwnerAlias": "The alias of the snapshot owner.
", "DisassociateAddressRequest$AssociationId": "[EC2-VPC] The association ID. Required for EC2-VPC.
", "DisassociateAddressRequest$PublicIp": "[EC2-Classic] The Elastic IP address. Required for EC2-Classic.
", "DisassociateClientVpnTargetNetworkRequest$AssociationId": "The ID of the target network association.
", @@ -10343,6 +10584,15 @@ "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorArn": "The Amazon Resource Name (ARN) of the elastic inference accelerator.
", "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorAssociationId": "The ID of the association.
", "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorAssociationState": "The state of the elastic inference accelerator.
", + "EnableFastSnapshotRestoreErrorItem$SnapshotId": "The ID of the snapshot.
", + "EnableFastSnapshotRestoreStateError$Code": "The error code.
", + "EnableFastSnapshotRestoreStateError$Message": "The error message.
", + "EnableFastSnapshotRestoreStateErrorItem$AvailabilityZone": "The Availability Zone.
", + "EnableFastSnapshotRestoreSuccessItem$SnapshotId": "The ID of the snapshot.
", + "EnableFastSnapshotRestoreSuccessItem$AvailabilityZone": "The Availability Zone.
", + "EnableFastSnapshotRestoreSuccessItem$StateTransitionReason": "The reason for the state transition. The possible values are as follows:
Client.UserInitiated
- The state successfully transitioned to enabling
or disabling
.
Client.UserInitiated - Lifecycle state transition
- The state successfully transitioned to optimizing
, enabled
, or disabled
.
The ID of the AWS account that owns the snapshot.
", + "EnableFastSnapshotRestoreSuccessItem$OwnerAlias": "The alias of the snapshot owner.
", "EventInformation$EventDescription": "The description of the event.
", "EventInformation$EventSubType": "The event.
The following are the error
events:
iamFleetRoleInvalid
- The EC2 Fleet or Spot Fleet did not have the required permissions either to launch or terminate an instance.
spotFleetRequestConfigurationInvalid
- The configuration is not valid. For more information, see the description of the event.
spotInstanceCountLimitExceeded
- You've reached the limit on the number of Spot Instances that you can launch.
The following are the fleetRequestChange
events:
active
- The EC2 Fleet or Spot Fleet request has been validated and Amazon EC2 is attempting to maintain the target number of running Spot Instances.
cancelled
- The EC2 Fleet or Spot Fleet request is canceled and has no running Spot Instances. The EC2 Fleet or Spot Fleet will be deleted two days after its instances were terminated.
cancelled_running
- The EC2 Fleet or Spot Fleet request is canceled and does not launch additional Spot Instances. Existing Spot Instances continue to run until they are interrupted or terminated.
cancelled_terminating
- The EC2 Fleet or Spot Fleet request is canceled and its Spot Instances are terminating.
expired
- The EC2 Fleet or Spot Fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration
set.
modify_in_progress
- A request to modify the EC2 Fleet or Spot Fleet request was accepted and is in progress.
modify_successful
- The EC2 Fleet or Spot Fleet request was modified.
price_update
- The price for a launch configuration was adjusted because it was too high. This change is permanent.
submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot Instances.
The following are the instanceChange
events:
launched
- A request was fulfilled and a new instance was launched.
terminated
- An instance was terminated by the user.
The following are the Information
events:
launchSpecTemporarilyBlacklisted
- The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.
launchSpecUnusable
- The price in a launch specification is not valid because it is below the Spot price or the Spot price is above the On-Demand price.
fleetProgressHalted
- The price in every launch specification is not valid. A launch specification might become valid if the Spot price changes.
The ID of the instance. This information is available only for instanceChange
events.
A new value for the attribute. Use only with the kernel
, ramdisk
, userData
, disableApiTermination
, or instanceInitiatedShutdownBehavior
attribute.
A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.
", "ModifyInstanceEventStartTimeRequest$InstanceEventId": "The ID of the event whose date and time you are modifying.
", + "ModifyInstanceMetadataOptionsRequest$InstanceId": "The ID of the instance.
", + "ModifyInstanceMetadataOptionsResult$InstanceId": "The ID of the instance.
", "ModifyLaunchTemplateRequest$ClientToken": "Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.
Constraint: Maximum 128 ASCII characters.
", "ModifyLaunchTemplateRequest$DefaultVersion": "The version number of the launch template to set as the default version.
", "ModifyReservedInstancesRequest$ClientToken": "A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.
", @@ -11383,6 +11635,7 @@ "ClassicLinkInstance$Tags": "Any tags assigned to the instance.
", "ClientVpnEndpoint$Tags": "Any tags assigned to the Client VPN endpoint.
", "ConversionTask$Tags": "Any tags assigned to the task.
", + "CopySnapshotResult$Tags": "Any tags applied to the new snapshot.
", "CreateTagsRequest$Tags": "The tags. The value
parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.
Any tags assigned to the customer gateway.
", "DeleteTagsRequest$Tags": "The tags to delete. Specify a tag key and an optional tag value to delete specific tags. If you specify a tag key without a tag value, we delete any tag with this key regardless of its value. If you specify a tag key with an empty string as the tag value, we delete the tag only if its value is an empty string.
If you omit this parameter, we delete all user-defined tags for the specified resources. We do not delete AWS-generated tags (tags that have the aws:
prefix).
The tags to apply to the Dedicated Host during creation.
", + "CopySnapshotRequest$TagSpecifications": "The tags to apply to the new snapshot.
", "CreateCapacityReservationRequest$TagSpecifications": "The tags to apply to the Capacity Reservation during launch.
", "CreateClientVpnEndpointRequest$TagSpecifications": "The tags to apply to the Client VPN endpoint during creation.
", "CreateFleetRequest$TagSpecifications": "The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType
must be fleet
, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.
A list of attribute objects that meet the criteria of the request.
", "PutAttributesRequest$attributes": "The attributes to apply to your resource. You can specify up to 10 custom attributes per resource. You can specify up to 10 attributes in a single call.
", "PutAttributesResponse$attributes": "The attributes applied to your resource.
", - "RegisterContainerInstanceRequest$attributes": "The container instance attributes that this container instance supports.
" + "RegisterContainerInstanceRequest$attributes": "The container instance attributes that this container instance supports.
", + "Task$attributes": "The attributes of the task
" } }, "AwsVpcConfiguration": { @@ -169,8 +170,8 @@ "Container$exitCode": "The exit code returned from the container.
", "ContainerDefinition$memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory
value, if one is specified. This parameter maps to Memory
in the Create a container section of the Docker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory
and memoryReservation
value, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory
is used.
The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.
", "ContainerDefinition$memoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory
parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation
in the Create a container section of the Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory
or memoryReservation
in a container definition. If you specify both, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory
is used.
For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation
of 128 MiB, and a memory
hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.
The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.
", - "ContainerDefinition$startTimeout": "Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED
state.
For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.
", - "ContainerDefinition$stopTimeout": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout
value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.
For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT
container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED
state.
For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires platform version 1.3.0
or later.
Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the max stopTimeout
value is 2 minutes and the task or service requires platform version 1.3.0
or later.
For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT
container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The number of cpu
units reserved for the container, instead of the default value from the task definition. You must also specify a container name.
The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.
", "ContainerOverride$memoryReservation": "The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name.
", @@ -327,7 +328,7 @@ "ContainerDependencies": { "base": null, "refs": { - "ContainerDefinition$dependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.
For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.
" + "ContainerDefinition$dependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.
For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires platform version 1.3.0
or later.
The Elastic Inference accelerators to use for the containers in the task.
", - "Task$inferenceAccelerators": "The Elastic Inference accelerator associated with the task.
" + "Task$inferenceAccelerators": "The Elastic Inference accelerator associated with the task.
", + "TaskDefinition$inferenceAccelerators": "The Elastic Inference accelerator associated with the task.
" } }, "Integer": { @@ -953,7 +955,7 @@ "LogDriver": { "base": null, "refs": { - "LogConfiguration$logDriver": "The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.
For tasks using the Fargate launch type, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks using the EC2 launch type, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
, logentries
, syslog
, splunk
, and awsfirelens
.
For more information about using the awslogs
log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.
For tasks using the Fargate launch type, the supported log drivers are awslogs
and splunk
.
For tasks using the EC2 launch type, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
, logentries
, syslog
, and splunk
.
For more information about using the awslogs
log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls
to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver
in the Create a volume section of the Docker Remote API and the xxdriver
option to docker volume create.
The Amazon Resource Name (ARN) of the failed resource.
", "Failure$reason": "The reason for the failure.
", + "Failure$detail": "The details of the failure.
", "FirelensConfigurationOptionsMap$key": null, "FirelensConfigurationOptionsMap$value": null, "GpuIds$member": null, @@ -1532,33 +1535,33 @@ "KeyValuePair$value": "The value of the key-value pair. For environment variables, this is the value of the environment variable.
", "ListAccountSettingsRequest$value": "The value of the account settings with which to filter results. You must also specify an account setting name to use this parameter.
", "ListAccountSettingsRequest$principalArn": "The ARN of the principal, which can be an IAM user, IAM role, or the root user. If this field is omitted, the account settings are listed only for the authenticated user.
", - "ListAccountSettingsRequest$nextToken": "The nextToken
value returned from a previous paginated ListAccountSettings
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListAccountSettings
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListAccountSettings
request. When the results of a ListAccountSettings
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed.
", "ListAttributesRequest$attributeName": "The name of the attribute with which to filter the results.
", "ListAttributesRequest$attributeValue": "The value of the attribute with which to filter results. You must also specify an attribute name to use this parameter.
", - "ListAttributesRequest$nextToken": "The nextToken
value returned from a previous paginated ListAttributes
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListAttributes
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListAttributes
request. When the results of a ListAttributes
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The nextToken
value returned from a previous paginated ListClusters
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListClusters
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListClusters
request. When the results of a ListClusters
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. If you do not specify a cluster, the default cluster is assumed.
", "ListContainerInstancesRequest$filter": "You can filter the results of a ListContainerInstances
operation with cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.
The nextToken
value returned from a previous paginated ListContainerInstances
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListContainerInstances
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListContainerInstances
request. When the results of a ListContainerInstances
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. If you do not specify a cluster, the default cluster is assumed.
", - "ListServicesRequest$nextToken": "The nextToken
value returned from a previous paginated ListServices
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListServices
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListServices
request. When the results of a ListServices
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.
", "ListTaskDefinitionFamiliesRequest$familyPrefix": "The familyPrefix
is a string that is used to filter the results of ListTaskDefinitionFamilies
. If you specify a familyPrefix
, only task definition family names that begin with the familyPrefix
string are returned.
The nextToken
value returned from a previous paginated ListTaskDefinitionFamilies
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListTaskDefinitionFamilies
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListTaskDefinitionFamilies
request. When the results of a ListTaskDefinitionFamilies
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The full family name with which to filter the ListTaskDefinitions
results. Specifying a familyPrefix
limits the listed task definitions to task definition revisions that belong to that family.
The nextToken
value returned from a previous paginated ListTaskDefinitions
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListTaskDefinitions
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListTaskDefinitions
request. When the results of a ListTaskDefinitions
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks to list. If you do not specify a cluster, the default cluster is assumed.
", "ListTasksRequest$containerInstance": "The container instance ID or full ARN of the container instance with which to filter the ListTasks
results. Specifying a containerInstance
limits the results to tasks that belong to that container instance.
The name of the family with which to filter the ListTasks
results. Specifying a family
limits the results to tasks that belong to that family.
The nextToken
value returned from a previous paginated ListTasks
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value returned from a ListTasks
request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults
was provided, it is possible the number of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
The startedBy
value with which to filter the task results. Specifying a startedBy
value limits the results to tasks that were started with that value.
The name of the service with which to filter the ListTasks
results. Specifying a serviceName
limits the results to tasks that belong to that service.
The nextToken
value to include in a future ListTasks
request. When the results of a ListTasks
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return.
The type of the resource, such as INTEGER
, DOUBLE
, LONG
, or STRINGSET
.
The value for the specified resource type.
If the GPU
type is used, the value is the number of physical GPUs
the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.
If the InferenceAccelerator
type is used, the value
should match the deviceName
for an InferenceAccelerator specified in a task definition.
The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed.
", - "RunTaskRequest$taskDefinition": "The family
and revision
(family:revision
) or full ARN of the task definition to run. If a revision
is not specified, the latest ACTIVE
revision is used.
An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).
", "RunTaskRequest$platformVersion": "The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST
platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.
The reference ID to use for the task.
", + "RunTaskRequest$startedBy": "An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
The family
and revision
(family:revision
) or full ARN of the task definition to run. If a revision
is not specified, the latest ACTIVE
revision is used.
The name of the secret.
", "Secret$valueFrom": "The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.
If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.
Whether the account setting is enabled or disabled for the specified resource.
", "Setting$principalArn": "The ARN of the principal, which can be an IAM user, IAM role, or the root user. If this field is omitted, the authenticated user is assumed.
", "StartTaskRequest$cluster": "The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. If you do not specify a cluster, the default cluster is assumed.
", - "StartTaskRequest$taskDefinition": "The family
and revision
(family:revision
) or full ARN of the task definition to start. If a revision
is not specified, the latest ACTIVE
revision is used.
An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).
", + "StartTaskRequest$referenceId": "The reference ID to use for the task.
", + "StartTaskRequest$startedBy": "An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
The family
and revision
(family:revision
) or full ARN of the task definition to start. If a revision
is not specified, the latest ACTIVE
revision is used.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. If you do not specify a cluster, the default cluster is assumed.
", "StopTaskRequest$task": "The task ID or full Amazon Resource Name (ARN) of the task to stop.
", "StopTaskRequest$reason": "An optional message specified when a task is stopped. For example, if you are using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.
", @@ -1643,18 +1648,19 @@ "SystemControl$namespace": "The namespaced kernel parameter for which to set a value
.
The value for the namespaced kernel parameter specified in namespace
.
The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.
", - "Task$taskArn": "The Amazon Resource Name (ARN) of the task.
", + "Task$availabilityZone": "The availability zone of the task.
", "Task$clusterArn": "The ARN of the cluster that hosts the task.
", - "Task$taskDefinitionArn": "The ARN of the task definition that creates the task.
", "Task$containerInstanceArn": "The ARN of the container instances that host the task.
", - "Task$lastStatus": "The last known status of the task. For more information, see Task Lifecycle.
", - "Task$desiredStatus": "The desired status of the task. For more information, see Task Lifecycle.
", "Task$cpu": "The number of CPU units used by the task as expressed in a task definition. It can be expressed as an integer using CPU units, for example 1024
. It can also be expressed as a string using vCPUs, for example 1 vCPU
or 1 vcpu
. String values are converted to an integer indicating the CPU units when the task definition is registered.
If you are using the EC2 launch type, this field is optional. Supported values are between 128
CPU units (0.125
vCPUs) and 10240
CPU units (10
vCPUs).
If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory
parameter:
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
The desired status of the task. For more information, see Task Lifecycle.
", + "Task$group": "The name of the task group associated with the task.
", + "Task$lastStatus": "The last known status of the task. For more information, see Task Lifecycle.
", "Task$memory": "The amount of memory (in MiB) used by the task as expressed in a task definition. It can be expressed as an integer using MiB, for example 1024
. It can also be expressed as a string using GB, for example 1GB
or 1 GB
. String values are converted to an integer indicating the MiB when the task definition is registered.
If you are using the EC2 launch type, this field is optional.
If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST
platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.
The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
The reason that the task was stopped.
", - "Task$group": "The name of the task group associated with the task.
", - "Task$platformVersion": "The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST
platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.
The Amazon Resource Name (ARN) of the task.
", + "Task$taskDefinitionArn": "The ARN of the task definition that creates the task.
", "TaskDefinition$taskDefinitionArn": "The full Amazon Resource Name (ARN) of the task definition.
", "TaskDefinition$family": "The name of a family that this task definition is registered to. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.
", "TaskDefinition$taskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.
IAM roles for tasks on Windows require that the -EnableTaskIAMRole
option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
The number of cpu
units used by the task. If you are using the EC2 launch type, this field is optional and any value can be used. If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory
parameter:
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
The amount (in MiB) of memory used by the task.
If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.
If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.
", - "TaskOverride$taskRoleArn": "The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.
", + "TaskOverride$cpu": "The cpu override for the task.
", "TaskOverride$executionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.
", + "TaskOverride$memory": "The memory override for the task.
", + "TaskOverride$taskRoleArn": "The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.
", "TaskSet$id": "The ID of the task set.
", "TaskSet$taskSetArn": "The Amazon Resource Name (ARN) of the task set.
", "TaskSet$serviceArn": "The Amazon Resource Name (ARN) of the service the task set exists in.
", @@ -1970,13 +1978,13 @@ "SubmitTaskStateChangeRequest$pullStoppedAt": "The Unix timestamp for when the container image pull completed.
", "SubmitTaskStateChangeRequest$executionStoppedAt": "The Unix timestamp for when the task execution stopped.
", "Task$connectivityAt": "The Unix timestamp for when the task last went into CONNECTED
status.
The Unix timestamp for when the task was created (the task entered the PENDING
state).
The Unix timestamp for when the task execution stopped.
", "Task$pullStartedAt": "The Unix timestamp for when the container image pull began.
", "Task$pullStoppedAt": "The Unix timestamp for when the container image pull completed.
", - "Task$executionStoppedAt": "The Unix timestamp for when the task execution stopped.
", - "Task$createdAt": "The Unix timestamp for when the task was created (the task entered the PENDING
state).
The Unix timestamp for when the task started (the task transitioned from the PENDING
state to the RUNNING
state).
The Unix timestamp for when the task stops (transitions from the RUNNING
state to STOPPED
).
The Unix timestamp for when the task was stopped (the task transitioned from the RUNNING
state to the STOPPED
state).
The Unix timestamp for when the task stops (transitions from the RUNNING
state to STOPPED
).
The Unix timestamp for when the task set was created.
", "TaskSet$updatedAt": "The Unix timestamp for when the task set was last updated.
", "TaskSet$stabilityStatusAt": "The Unix timestamp for when the task set stability status was retrieved.
" diff --git a/models/apis/eks/2017-11-01/api-2.json b/models/apis/eks/2017-11-01/api-2.json index 9b4beae09e7..2b60a3dafdc 100644 --- a/models/apis/eks/2017-11-01/api-2.json +++ b/models/apis/eks/2017-11-01/api-2.json @@ -31,6 +31,24 @@ {"shape":"UnsupportedAvailabilityZoneException"} ] }, + "CreateNodegroup":{ + "name":"CreateNodegroup", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups" + }, + "input":{"shape":"CreateNodegroupRequest"}, + "output":{"shape":"CreateNodegroupResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -47,6 +65,23 @@ {"shape":"ServiceUnavailableException"} ] }, + "DeleteNodegroup":{ + "name":"DeleteNodegroup", + "http":{ + "method":"DELETE", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}" + }, + "input":{"shape":"DeleteNodegroupRequest"}, + "output":{"shape":"DeleteNodegroupResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DescribeCluster":{ "name":"DescribeCluster", "http":{ @@ -62,6 +97,22 @@ {"shape":"ServiceUnavailableException"} ] }, + "DescribeNodegroup":{ + "name":"DescribeNodegroup", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}" + }, + "input":{"shape":"DescribeNodegroupRequest"}, + "output":{"shape":"DescribeNodegroupResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DescribeUpdate":{ "name":"DescribeUpdate", "http":{ @@ -92,6 +143,22 @@ {"shape":"ServiceUnavailableException"} ] }, + "ListNodegroups":{ + "name":"ListNodegroups", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/node-groups" + }, + "input":{"shape":"ListNodegroupsRequest"}, + "output":{"shape":"ListNodegroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -179,9 +246,60 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestException"} ] + }, + "UpdateNodegroupConfig":{ + "name":"UpdateNodegroupConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}/update-config" + }, + "input":{"shape":"UpdateNodegroupConfigRequest"}, + "output":{"shape":"UpdateNodegroupConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ] + }, + "UpdateNodegroupVersion":{ + "name":"UpdateNodegroupVersion", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}/update-version" + }, + "input":{"shape":"UpdateNodegroupVersionRequest"}, + "output":{"shape":"UpdateNodegroupVersionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ] } }, "shapes":{ + "AMITypes":{ + "type":"string", + "enum":[ + "AL2_x86_64", + "AL2_x86_64_GPU" + ] + }, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "name":{"shape":"String"} + } + }, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, "BadRequestException":{ "type":"structure", "members":{ @@ -195,6 +313,15 @@ "type":"boolean", "box":true }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "Capacity":{ + "type":"integer", + "box":true, + "min":1 + }, "Certificate":{ "type":"structure", "members":{ @@ -205,6 +332,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -241,7 +369,8 @@ "CREATING", "ACTIVE", "DELETING", - "FAILED" + "FAILED", + "UPDATING" ] }, "CreateClusterRequest":{ @@ -270,6 +399,44 @@ "cluster":{"shape":"Cluster"} } }, + "CreateNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName", + "subnets", + "nodeRole" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{"shape":"String"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "diskSize":{"shape":"BoxedInteger"}, + "subnets":{"shape":"StringList"}, + "instanceTypes":{"shape":"StringList"}, + "amiType":{"shape":"AMITypes"}, + "remoteAccess":{"shape":"RemoteAccessConfig"}, + "nodeRole":{"shape":"String"}, + "labels":{"shape":"labelsMap"}, + "tags":{"shape":"TagMap"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + }, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"} + } + }, + "CreateNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["name"], @@ -287,6 +454,31 @@ "cluster":{"shape":"Cluster"} } }, + "DeleteNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + } + } + }, + "DeleteNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DescribeClusterRequest":{ "type":"structure", "required":["name"], @@ -304,6 +496,31 @@ "cluster":{"shape":"Cluster"} } }, + "DescribeNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + } + } + }, + "DescribeNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DescribeUpdateRequest":{ "type":"structure", "required":[ @@ -320,6 +537,11 @@ "shape":"String", "location":"uri", "locationName":"updateId" + }, + "nodegroupName":{ + "shape":"String", + "location":"querystring", + "locationName":"nodegroupName" } } }, @@ -339,7 +561,10 @@ "AccessDenied", "OperationNotPermitted", "VpcIdNotFound", - "Unknown" + "Unknown", + "NodeCreationFailure", + "PodEvictionFailure", + "InsufficientFreeAddresses" ] }, "ErrorDetail":{ @@ -364,6 +589,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -373,11 +599,24 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, "exception":true }, + "Issue":{ + "type":"structure", + "members":{ + "code":{"shape":"NodegroupIssueCode"}, + "message":{"shape":"String"}, + "resourceIds":{"shape":"StringList"} + } + }, + "IssueList":{ + "type":"list", + "member":{"shape":"Issue"} + }, "ListClustersRequest":{ "type":"structure", "members":{ @@ -406,6 +645,40 @@ "nextToken":{"shape":"String"} } }, + "ListNodegroupsRequest":{ + "type":"structure", + "required":["clusterName"], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListNodegroupsRequestMaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNodegroupsRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListNodegroupsResponse":{ + "type":"structure", + "members":{ + "nodegroups":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -432,6 +705,11 @@ "location":"uri", "locationName":"name" }, + "nodegroupName":{ + "shape":"String", + "location":"querystring", + "locationName":"nodegroupName" + }, "nextToken":{ "shape":"String", "location":"querystring", @@ -488,6 +766,80 @@ "clusterLogging":{"shape":"LogSetups"} } }, + "Nodegroup":{ + "type":"structure", + "members":{ + "nodegroupName":{"shape":"String"}, + "nodegroupArn":{"shape":"String"}, + "clusterName":{"shape":"String"}, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"}, + "status":{"shape":"NodegroupStatus"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "instanceTypes":{"shape":"StringList"}, + "subnets":{"shape":"StringList"}, + "remoteAccess":{"shape":"RemoteAccessConfig"}, + "amiType":{"shape":"AMITypes"}, + "nodeRole":{"shape":"String"}, + "labels":{"shape":"labelsMap"}, + "resources":{"shape":"NodegroupResources"}, + "diskSize":{"shape":"BoxedInteger"}, + "health":{"shape":"NodegroupHealth"}, + "tags":{"shape":"TagMap"} + } + }, + "NodegroupHealth":{ + "type":"structure", + "members":{ + "issues":{"shape":"IssueList"} + } + }, + "NodegroupIssueCode":{ + "type":"string", + "enum":[ + "AutoScalingGroupNotFound", + "Ec2SecurityGroupNotFound", + "Ec2SecurityGroupDeletionFailure", + "Ec2LaunchTemplateNotFound", + "Ec2LaunchTemplateVersionMismatch", + "IamInstanceProfileNotFound", + "IamNodeRoleNotFound", + "AsgInstanceLaunchFailures", + "InstanceLimitExceeded", + "InsufficientFreeAddresses", + "AccessDenied", + "InternalFailure" + ] + }, + "NodegroupResources":{ + "type":"structure", + "members":{ + "autoScalingGroups":{"shape":"AutoScalingGroupList"}, + "remoteAccessSecurityGroup":{"shape":"String"} + } + }, + "NodegroupScalingConfig":{ + "type":"structure", + "members":{ + "minSize":{"shape":"Capacity"}, + "maxSize":{"shape":"Capacity"}, + "desiredSize":{"shape":"Capacity"} + } + }, + "NodegroupStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATE_FAILED", + "DELETE_FAILED", + "DEGRADED" + ] + }, "NotFoundException":{ "type":"structure", "members":{ @@ -502,10 +854,18 @@ "issuer":{"shape":"String"} } }, + "RemoteAccessConfig":{ + "type":"structure", + "members":{ + "ec2SshKey":{"shape":"String"}, + "sourceSecurityGroups":{"shape":"StringList"} + } + }, "ResourceInUseException":{ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":409}, @@ -515,6 +875,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -524,6 +885,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":404}, @@ -533,6 +895,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":500}, @@ -601,6 +964,7 @@ "members":{ "message":{"shape":"String"}, "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "validZones":{"shape":"StringList"} }, "error":{"httpStatusCode":400}, @@ -689,6 +1053,76 @@ "update":{"shape":"Update"} } }, + "UpdateLabelsPayload":{ + "type":"structure", + "members":{ + "addOrUpdateLabels":{"shape":"labelsMap"}, + "removeLabels":{"shape":"labelsKeyList"} + } + }, + "UpdateNodegroupConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + }, + "labels":{"shape":"UpdateLabelsPayload"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + } + } + }, + "UpdateNodegroupConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, + "UpdateNodegroupVersionRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + }, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"}, + "force":{"shape":"Boolean"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + } + } + }, + "UpdateNodegroupVersionResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, "UpdateParam":{ "type":"structure", "members":{ @@ -703,7 +1137,13 @@ "PlatformVersion", "EndpointPrivateAccess", "EndpointPublicAccess", - "ClusterLogging" + "ClusterLogging", + "DesiredSize", + "LabelsToAdd", + "LabelsToRemove", + "MaxSize", + "MinSize", + "ReleaseVersion" ] }, "UpdateParams":{ @@ -724,7 +1164,8 @@ "enum":[ "VersionUpdate", "EndpointAccessUpdate", - "LoggingUpdate" + "LoggingUpdate", + "ConfigUpdate" ] }, "VpcConfigRequest":{ @@ -741,10 +1182,30 @@ "members":{ "subnetIds":{"shape":"StringList"}, "securityGroupIds":{"shape":"StringList"}, + "clusterSecurityGroupId":{"shape":"String"}, "vpcId":{"shape":"String"}, "endpointPublicAccess":{"shape":"Boolean"}, "endpointPrivateAccess":{"shape":"Boolean"} } + }, + "labelKey":{ + "type":"string", + "max":63, + "min":1 + }, + "labelValue":{ + "type":"string", + "max":253, + "min":1 + }, + "labelsKeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "labelsMap":{ + "type":"map", + "key":{"shape":"labelKey"}, + "value":{"shape":"labelValue"} } } } diff --git a/models/apis/eks/2017-11-01/docs-2.json b/models/apis/eks/2017-11-01/docs-2.json index 307719237fc..960da577fa5 100644 --- a/models/apis/eks/2017-11-01/docs-2.json +++ b/models/apis/eks/2017-11-01/docs-2.json @@ -3,18 +3,43 @@ "service": "Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.
Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.
", "operations": { "CreateCluster": "Creates an Amazon EKS control plane.
The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd
and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.
The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support kubectl exec
, logs
, and proxy
data flows).
Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.
You can use the endpointPublicAccess
and endpointPrivateAccess
parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .
You can use the logging
parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.
Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS Worker Nodes in the Amazon EKS User Guide.
", - "DeleteCluster": "Deletes the Amazon EKS cluster control plane.
If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see Deleting a Cluster in the Amazon EKS User Guide.
Creates a managed worker node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster.
An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS-optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.
", + "DeleteCluster": "Deletes the Amazon EKS cluster control plane.
If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see Deleting a Cluster in the Amazon EKS User Guide.
If you have managed node groups attached to the cluster, you must delete them first. For more information, see DeleteNodegroup.
", + "DeleteNodegroup": "Deletes an Amazon EKS node group for a cluster.
", "DescribeCluster": "Returns descriptive information about an Amazon EKS cluster.
The API server endpoint and certificate authority data returned by this operation are required for kubelet
and kubectl
to communicate with your Kubernetes API server. For more information, see Create a kubeconfig for Amazon EKS.
The API server endpoint and certificate authority data aren't available until the cluster reaches the ACTIVE
state.
Returns descriptive information about an update against your Amazon EKS cluster.
When the status of the update is Succeeded
, the update is complete. If an update fails, the status is Failed
, and an error detail explains the reason for the failure.
Returns descriptive information about an Amazon EKS node group.
", + "DescribeUpdate": "Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group.
When the status of the update is Succeeded
, the update is complete. If an update fails, the status is Failed
, and an error detail explains the reason for the failure.
Lists the Amazon EKS clusters in your AWS account in the specified Region.
", + "ListNodegroups": "Lists the Amazon EKS node groups associated with the specified cluster in your AWS account in the specified Region.
", "ListTagsForResource": "List the tags for an Amazon EKS resource.
", - "ListUpdates": "Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.
", - "TagResource": "Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.
Lists the updates associated with an Amazon EKS cluster or managed node group in your AWS account, in the specified Region.
", + "TagResource": "Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the cluster. For example, if you tag a cluster with this operation, that tag does not automatically propagate to the subnets and worker nodes associated with the cluster.
Deletes specified tags from a resource.
", "UpdateClusterConfig": "Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.
You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .
At this time, you can not update the subnets or security group IDs for an existing cluster.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING
(this status transition is eventually consistent). When the update is complete (either Failed
or Successful
), the cluster status moves to Active
.
Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING
(this status transition is eventually consistent). When the update is complete (either Failed
or Successful
), the cluster status moves to Active
.
Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING
(this status transition is eventually consistent). When the update is complete (either Failed
or Successful
), the cluster status moves to Active
.
If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes versions must match the cluster’s Kubernetes version in order to update the cluster to a new Kubernetes version.
", + "UpdateNodegroupConfig": "Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels for a node group or the scaling configuration.
", + "UpdateNodegroupVersion": "Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.
You can update to the latest available AMI version of a node group's current Kubernetes version by not specifying a Kubernetes version in the request. You can update to the latest AMI version of your cluster's current Kubernetes version by specifying your cluster's Kubernetes version in the request. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.
You cannot roll back a node group to an earlier Kubernetes version or AMI version.
When a node in a managed node group is terminated due to a scaling action or update, the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully and will fail if it is unable to do so. You can force
the update if Amazon EKS is unable to drain the nodes as a result of a pod disruption budget issue.
The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU
AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support; non-GPU instances should use the AL2_x86_64
AMI type, which uses the Amazon EKS-optimized Linux AMI.
The AMI type associated with your node group. GPU instance types should use the AL2_x86_64_GPU
AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support; non-GPU instances should use the AL2_x86_64
AMI type, which uses the Amazon EKS-optimized Linux AMI.
An AutoScaling group that is associated with an Amazon EKS managed node group.
", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "NodegroupResources$autoScalingGroups": "The autoscaling groups associated with the node group.
" + } + }, "BadRequestException": { "base": "This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.
", "refs": { @@ -23,6 +48,7 @@ "Boolean": { "base": null, "refs": { + "UpdateNodegroupVersionRequest$force": "Force the update if the existing node group's pods are unable to be drained due to a pod disruption budget issue. If a previous update fails because pods could not be drained, you can force the update after it fails to terminate the old node regardless of whether or not any pods are running on the node.
", "VpcConfigResponse$endpointPublicAccess": "This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can receive only requests that originate from within the cluster VPC.
", "VpcConfigResponse$endpointPrivateAccess": "This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet.
" } @@ -35,6 +61,21 @@ "VpcConfigRequest$endpointPrivateAccess": "Set this value to true
to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false
, which disables private access for your Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .
The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB.
", + "Nodegroup$diskSize": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB.
" + } + }, + "Capacity": { + "base": null, + "refs": { + "NodegroupScalingConfig$minSize": "The minimum number of worker nodes that the managed node group can scale in to. This number must be greater than zero.
", + "NodegroupScalingConfig$maxSize": "The maximum number of worker nodes that the managed node group can scale out to. Managed node groups can support up to 100 nodes by default.
", + "NodegroupScalingConfig$desiredSize": "The current number of worker nodes that the managed node group should maintain.
" + } + }, "Certificate": { "base": "An object representing the certificate-authority-data
for your cluster.
An object representing an issue with an Amazon EKS resource.
", + "refs": { + "IssueList$member": null + } + }, + "IssueList": { + "base": null, + "refs": { + "NodegroupHealth$issues": "Any issues that are associated with the node group.
" + } + }, "ListClustersRequest": { "base": null, "refs": { @@ -156,6 +239,22 @@ "refs": { } }, + "ListNodegroupsRequest": { + "base": null, + "refs": { + } + }, + "ListNodegroupsRequestMaxResults": { + "base": null, + "refs": { + "ListNodegroupsRequest$maxResults": "The maximum number of node group results returned by ListNodegroups
in paginated output. When you use this parameter, ListNodegroups
returns only maxResults
results in a single page along with a nextToken
response element. You can see the remaining results of the initial request by sending another ListNodegroups
request with the returned nextToken
value. This value can be between 1 and 100. If you don't use this parameter, ListNodegroups
returns up to 100 results and a nextToken
value if applicable.
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.
An object representing an Amazon EKS managed node group.
", + "refs": { + "CreateNodegroupResponse$nodegroup": "The full description of your new node group.
", + "DeleteNodegroupResponse$nodegroup": "The full description of your deleted node group.
", + "DescribeNodegroupResponse$nodegroup": "The full description of your node group.
" + } + }, + "NodegroupHealth": { + "base": "An object representing the health status of the node group.
", + "refs": { + "Nodegroup$health": "The health status of the node group. If there are issues with your node group's health, they are listed here.
" + } + }, + "NodegroupIssueCode": { + "base": null, + "refs": { + "Issue$code": "A brief description of the error.
AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.
Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster.
Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.
Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.
Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the Amazon EKS-created version to recover.
IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.
IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.
AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances.
InstanceLimitExceeded: Your AWS account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.
InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.
AccessDenied: Amazon EKS and or one or more of your managed nodes is unable to communicate with your cluster API server.
InternalFailure: These errors are usually caused by an Amazon EKS server-side issue.
An object representing the resources associated with the nodegroup, such as AutoScaling groups and security groups for remote access.
", + "refs": { + "Nodegroup$resources": "The resources associated with the nodegroup, such as AutoScaling groups and security groups for remote access.
" + } + }, + "NodegroupScalingConfig": { + "base": "An object representing the scaling configuration details for the AutoScaling group that is associated with your node group.
", + "refs": { + "CreateNodegroupRequest$scalingConfig": "The scaling configuration details for the AutoScaling group that is created for your node group.
", + "Nodegroup$scalingConfig": "The scaling configuration details for the AutoScaling group that is associated with your node group.
", + "UpdateNodegroupConfigRequest$scalingConfig": "The scaling configuration details for the AutoScaling group after the update.
" + } + }, + "NodegroupStatus": { + "base": null, + "refs": { + "Nodegroup$status": "The current status of the managed node group.
" + } + }, "NotFoundException": { "base": "A service resource associated with the request could not be found. Clients should not retry such requests.
", "refs": { @@ -225,6 +364,13 @@ "Identity$oidc": "The OpenID Connect identity provider information for the cluster.
" } }, + "RemoteAccessConfig": { + "base": "An object representing the remote access configuration for the managed node group.
", + "refs": { + "CreateNodegroupRequest$remoteAccess": "The remote access (SSH) configuration to use with your node group.
", + "Nodegroup$remoteAccess": "The remote access (SSH) configuration that is associated with the node group.
" + } + }, "ResourceInUseException": { "base": "The specified resource is in use.
", "refs": { @@ -236,7 +382,7 @@ } }, "ResourceNotFoundException": { - "base": "The specified resource could not be found. You can view your available clusters with ListClusters. Amazon EKS clusters are Region-specific.
", + "base": "The specified resource could not be found. You can view your available clusters with ListClusters. You can view your available managed node groups with ListNodegroups. Amazon EKS clusters and node groups are Region-specific.
", "refs": { } }, @@ -253,9 +399,11 @@ "String": { "base": null, "refs": { + "AutoScalingGroup$name": "The name of the AutoScaling group associated with an Amazon EKS managed node group.
", "BadRequestException$message": null, "Certificate$data": "The Base64-encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data
section of the kubeconfig
file for your cluster.
The Amazon EKS cluster associated with the exception.
", + "ClientException$nodegroupName": null, "ClientException$message": null, "Cluster$name": "The name of the cluster.
", "Cluster$arn": "The Amazon Resource Name (ARN) of the cluster.
", @@ -267,53 +415,101 @@ "CreateClusterRequest$version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the latest version available in Amazon EKS is used.
", "CreateClusterRequest$roleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide .
", "CreateClusterRequest$clientRequestToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", + "CreateNodegroupRequest$clusterName": "The name of the cluster to create the node group in.
", + "CreateNodegroupRequest$nodegroupName": "The unique name to give your node group.
", + "CreateNodegroupRequest$nodeRole": "The IAM role associated with your node group. The Amazon EKS worker node kubelet
daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch worker nodes and register them into a cluster, you must create an IAM role for those worker nodes to use when they are launched. For more information, see Amazon EKS Worker Node IAM Role in the Amazon EKS User Guide .
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", + "CreateNodegroupRequest$version": "The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value.
", + "CreateNodegroupRequest$releaseVersion": "The AMI version of the Amazon EKS-optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.
", "DeleteClusterRequest$name": "The name of the cluster to delete.
", + "DeleteNodegroupRequest$clusterName": "The name of the Amazon EKS cluster that is associated with your node group.
", + "DeleteNodegroupRequest$nodegroupName": "The name of the node group to delete.
", "DescribeClusterRequest$name": "The name of the cluster to describe.
", - "DescribeUpdateRequest$name": "The name of the Amazon EKS cluster to update.
", + "DescribeNodegroupRequest$clusterName": "The name of the Amazon EKS cluster associated with the node group.
", + "DescribeNodegroupRequest$nodegroupName": "The name of the node group to describe.
", + "DescribeUpdateRequest$name": "The name of the Amazon EKS cluster associated with the update.
", "DescribeUpdateRequest$updateId": "The ID of the update to describe.
", + "DescribeUpdateRequest$nodegroupName": "The name of the Amazon EKS node group associated with the update.
", "ErrorDetail$errorMessage": "A more complete description of the error.
", "InvalidParameterException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "InvalidParameterException$nodegroupName": null, "InvalidParameterException$message": null, "InvalidRequestException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "InvalidRequestException$nodegroupName": null, "InvalidRequestException$message": null, + "Issue$message": "The error message associated with the issue.
", "ListClustersRequest$nextToken": "The nextToken
value returned from a previous paginated ListClusters
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.
The nextToken
value to include in a future ListClusters
request. When the results of a ListClusters
request exceed maxResults
, you can use this value to retrieve the next page of results. This value is null
when there are no more results to return.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters.
", + "ListNodegroupsRequest$clusterName": "The name of the Amazon EKS cluster that you would like to list node groups in.
", + "ListNodegroupsRequest$nextToken": "The nextToken
value returned from a previous paginated ListNodegroups
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
The nextToken
value to include in a future ListNodegroups
request. When the results of a ListNodegroups
request exceed maxResults
, you can use this value to retrieve the next page of results. This value is null
when there are no more results to return.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.
", "ListUpdatesRequest$name": "The name of the Amazon EKS cluster to list updates for.
", + "ListUpdatesRequest$nodegroupName": "The name of the Amazon EKS managed node group to list updates for.
", "ListUpdatesRequest$nextToken": "The nextToken
value returned from a previous paginated ListUpdates
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value.
The nextToken
value to include in a future ListUpdates
request. When the results of a ListUpdates
request exceed maxResults
, you can use this value to retrieve the next page of results. This value is null
when there are no more results to return.
The name associated with an Amazon EKS managed node group.
", + "Nodegroup$nodegroupArn": "The Amazon Resource Name (ARN) associated with the managed node group.
", + "Nodegroup$clusterName": "The name of the cluster that the managed node group resides in.
", + "Nodegroup$version": "The Kubernetes version of the managed node group.
", + "Nodegroup$releaseVersion": "The AMI version of the managed node group. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.
", + "Nodegroup$nodeRole": "The IAM role associated with your node group. The Amazon EKS worker node kubelet
daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch worker nodes and register them into a cluster, you must create an IAM role for those worker nodes to use when they are launched. For more information, see Amazon EKS Worker Node IAM Role in the Amazon EKS User Guide .
The remote access security group associated with the node group. This security group controls SSH access to the worker nodes.
", "NotFoundException$message": null, "OIDC$issuer": "The issuer URL for the OpenID Connect identity provider.
", + "RemoteAccessConfig$ec2SshKey": "The Amazon EC2 SSH key that provides access for SSH communication with the worker nodes in the managed node group. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide for Linux Instances.
", "ResourceInUseException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "ResourceInUseException$nodegroupName": null, "ResourceInUseException$message": null, "ResourceLimitExceededException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "ResourceLimitExceededException$nodegroupName": null, "ResourceLimitExceededException$message": null, "ResourceNotFoundException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "ResourceNotFoundException$nodegroupName": null, "ResourceNotFoundException$message": null, "ServerException$clusterName": "The Amazon EKS cluster associated with the exception.
", + "ServerException$nodegroupName": null, "ServerException$message": null, "ServiceUnavailableException$message": null, "StringList$member": null, - "TagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters.
", + "TagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.
", "UnsupportedAvailabilityZoneException$message": null, "UnsupportedAvailabilityZoneException$clusterName": "The Amazon EKS cluster associated with the exception.
", - "UntagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters.
", + "UnsupportedAvailabilityZoneException$nodegroupName": null, + "UntagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.
", "Update$id": "A UUID that is used to track the update.
", "UpdateClusterConfigRequest$name": "The name of the Amazon EKS cluster to update.
", "UpdateClusterConfigRequest$clientRequestToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", "UpdateClusterVersionRequest$name": "The name of the Amazon EKS cluster to update.
", "UpdateClusterVersionRequest$version": "The desired Kubernetes version following a successful update.
", "UpdateClusterVersionRequest$clientRequestToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", + "UpdateNodegroupConfigRequest$clusterName": "The name of the Amazon EKS cluster that the managed node group resides in.
", + "UpdateNodegroupConfigRequest$nodegroupName": "The name of the managed node group to update.
", + "UpdateNodegroupConfigRequest$clientRequestToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", + "UpdateNodegroupVersionRequest$clusterName": "The name of the Amazon EKS cluster that is associated with the managed node group to update.
", + "UpdateNodegroupVersionRequest$nodegroupName": "The name of the managed node group to update.
", + "UpdateNodegroupVersionRequest$version": "The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version.
", + "UpdateNodegroupVersionRequest$releaseVersion": "The AMI version of the Amazon EKS-optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.
", + "UpdateNodegroupVersionRequest$clientRequestToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", "UpdateParam$value": "The value of the keys submitted as part of an update request.
", - "VpcConfigResponse$vpcId": "The VPC associated with your cluster.
" + "VpcConfigResponse$clusterSecurityGroupId": "The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control plane to data plane communication.
", + "VpcConfigResponse$vpcId": "The VPC associated with your cluster.
", + "labelsKeyList$member": null } }, "StringList": { "base": null, "refs": { + "CreateNodegroupRequest$subnets": "The subnets to use for the AutoScaling group that is created for your node group. These subnets must have the tag key kubernetes.io/cluster/CLUSTER_NAME
with a value of shared
, where CLUSTER_NAME
is replaced with the name of your cluster.
The instance type to use for your node group. Currently, you can specify a single instance type for a node group. The default value for this parameter is t3.medium
. If you choose a GPU instance type, be sure to specify the AL2_x86_64_GPU
with the amiType
parameter.
An optional field that contains the resource IDs associated with the error.
", + "Issue$resourceIds": "The AWS resources that are afflicted by this issue.
", "ListClustersResponse$clusters": "A list of all of the clusters for your account in the specified Region.
", + "ListNodegroupsResponse$nodegroups": "A list of all of the node groups associated with the specified cluster.
", "ListUpdatesResponse$updateIds": "A list of all the updates for the specified cluster and Region.
", + "Nodegroup$instanceTypes": "The instance types associated with your node group.
", + "Nodegroup$subnets": "The subnets allowed for the AutoScaling group that is associated with your node group. These subnets must have the following tag: kubernetes.io/cluster/CLUSTER_NAME
, where CLUSTER_NAME
is replaced with the name of your cluster.
The security groups to allow SSH access (port 22) from on the worker nodes. If you specify an Amazon EC2 SSH key, but you do not specify a source security group when you create a managed node group, port 22 on the worker nodes is opened to the internet (0.0.0.0/0). For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.
", "UnsupportedAvailabilityZoneException$validZones": "The supported Availability Zones for your account. Choose subnets in these Availability Zones for your cluster.
", "VpcConfigRequest$subnetIds": "Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.
", "VpcConfigRequest$securityGroupIds": "Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify a security group, the default security group for your VPC is used.
", @@ -337,9 +533,11 @@ "TagMap": { "base": null, "refs": { - "Cluster$tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.
", + "Cluster$tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags do not propagate to any other resources associated with the cluster.
", "CreateClusterRequest$tags": "The metadata to apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.
", + "CreateNodegroupRequest$tags": "The metadata to apply to the node group to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Node group tags do not propagate to any other resources associated with the node group, such as the Amazon EC2 instances or subnets.
", "ListTagsForResourceResponse$tags": "The tags for the resource.
", + "Nodegroup$tags": "The metadata applied the node group to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Node group tags do not propagate to any other resources associated with the node group, such as the Amazon EC2 instances or subnets.
", "TagResourceRequest$tags": "The tags to add to the resource. A tag is an array of key-value pairs.
" } }, @@ -363,6 +561,8 @@ "base": null, "refs": { "Cluster$createdAt": "The Unix epoch timestamp in seconds for when the cluster was created.
", + "Nodegroup$createdAt": "The Unix epoch timestamp in seconds for when the managed node group was created.
", + "Nodegroup$modifiedAt": "The Unix epoch timestamp in seconds for when the managed node group was last modified.
", "Update$createdAt": "The Unix epoch timestamp in seconds for when the update was created.
" } }, @@ -386,7 +586,9 @@ "refs": { "DescribeUpdateResponse$update": "The full description of the specified update.
", "UpdateClusterConfigResponse$update": null, - "UpdateClusterVersionResponse$update": "The full description of the specified update
" + "UpdateClusterVersionResponse$update": "The full description of the specified update
", + "UpdateNodegroupConfigResponse$update": null, + "UpdateNodegroupVersionResponse$update": null } }, "UpdateClusterConfigRequest": { @@ -409,6 +611,32 @@ "refs": { } }, + "UpdateLabelsPayload": { + "base": "An object representing a Kubernetes label change for a managed node group.
", + "refs": { + "UpdateNodegroupConfigRequest$labels": "The Kubernetes labels to be applied to the nodes in the node group after the update.
" + } + }, + "UpdateNodegroupConfigRequest": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupConfigResponse": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupVersionRequest": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupVersionResponse": { + "base": null, + "refs": { + } + }, "UpdateParam": { "base": "An object representing the details of an update request.
", "refs": { @@ -451,6 +679,32 @@ "refs": { "Cluster$resourcesVpcConfig": "The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide.
" } + }, + "labelKey": { + "base": null, + "refs": { + "labelsMap$key": null + } + }, + "labelValue": { + "base": null, + "refs": { + "labelsMap$value": null + } + }, + "labelsKeyList": { + "base": null, + "refs": { + "UpdateLabelsPayload$removeLabels": "Kubernetes labels to be removed.
" + } + }, + "labelsMap": { + "base": null, + "refs": { + "CreateNodegroupRequest$labels": "The Kubernetes labels to be applied to the nodes in the node group when they are created.
", + "Nodegroup$labels": "The Kubernetes labels applied to the nodes in the node group.
Only labels that are applied with the Amazon EKS API are shown here. There may be other Kubernetes labels applied to the nodes in this group.
Kubernetes labels to be added or updated.
" + } } } } diff --git a/models/apis/eks/2017-11-01/examples-1.json b/models/apis/eks/2017-11-01/examples-1.json index 6a83da723a4..8ea2517578d 100644 --- a/models/apis/eks/2017-11-01/examples-1.json +++ b/models/apis/eks/2017-11-01/examples-1.json @@ -109,6 +109,27 @@ "id": "to-list-your-available-clusters-1527868801040", "title": "To list your available clusters" } + ], + "ListTagsForResource": [ + { + "input": { + "resourceArn": "arn:aws:eks:us-west-2:012345678910:cluster/beta" + }, + "output": { + "tags": { + "aws:tag:domain": "beta" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example lists all of the tags for the `beta` cluster.", + "id": "to-list-tags-for-a-cluster-1568666903378", + "title": "To list tags for a cluster" + } ] } } diff --git a/models/apis/eks/2017-11-01/paginators-1.json b/models/apis/eks/2017-11-01/paginators-1.json index abd2c6bdec9..662c51e44bb 100644 --- a/models/apis/eks/2017-11-01/paginators-1.json +++ b/models/apis/eks/2017-11-01/paginators-1.json @@ -6,6 +6,12 @@ "output_token": "nextToken", "result_key": "clusters" }, + "ListNodegroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "nodegroups" + }, "ListUpdates": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/models/apis/eks/2017-11-01/waiters-2.json b/models/apis/eks/2017-11-01/waiters-2.json index c325e521f59..449d2296c39 100644 --- a/models/apis/eks/2017-11-01/waiters-2.json +++ b/models/apis/eks/2017-11-01/waiters-2.json @@ -49,6 +49,43 @@ "state": "success" } ] + }, + "NodegroupActive": { + "delay": 30, + "operation": "DescribeNodegroup", + "maxAttempts": 80, + "acceptors": [ + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "nodegroup.status" + }, + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "nodegroup.status" + } + ] + }, + "NodegroupDeleted": { + "delay": 30, + "operation": "DescribeNodegroup", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "DELETE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "nodegroup.status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] } } } diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json index c05766c4e4e..0254075cbe3 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json @@ -73,7 +73,8 @@ {"shape":"TooManyRegistrationsForTargetIdException"}, {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "CreateLoadBalancer":{ @@ -127,7 +128,8 @@ {"shape":"TooManyTargetsException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "CreateTargetGroup":{ @@ -424,7 +426,8 @@ {"shape":"TooManyRegistrationsForTargetIdException"}, {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "ModifyLoadBalancerAttributes":{ @@ -464,7 +467,8 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "ModifyTargetGroup":{ @@ -635,7 +639,8 @@ "AuthenticateCognitoConfig":{"shape":"AuthenticateCognitoActionConfig"}, "Order":{"shape":"ActionOrder"}, "RedirectConfig":{"shape":"RedirectActionConfig"}, - "FixedResponseConfig":{"shape":"FixedResponseActionConfig"} + "FixedResponseConfig":{"shape":"FixedResponseActionConfig"}, + "ForwardConfig":{"shape":"ForwardActionConfig"} } }, "ActionOrder":{ @@ -1242,6 +1247,13 @@ "type":"string", "pattern":"^(2|4|5)\\d\\d$" }, + "ForwardActionConfig":{ + "type":"structure", + "members":{ + "TargetGroups":{"shape":"TargetGroupList"}, + "TargetGroupStickinessConfig":{"shape":"TargetGroupStickinessConfig"} + } + }, "HealthCheckEnabled":{"type":"boolean"}, "HealthCheckIntervalSeconds":{ "type":"integer", @@ -2106,6 +2118,10 @@ "type":"list", "member":{"shape":"TargetGroupAttribute"} }, + "TargetGroupList":{ + "type":"list", + "member":{"shape":"TargetGroupTuple"} + }, "TargetGroupName":{"type":"string"}, "TargetGroupNames":{ "type":"list", @@ -2122,6 +2138,23 @@ }, "exception":true }, + "TargetGroupStickinessConfig":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"TargetGroupStickinessEnabled"}, + "DurationSeconds":{"shape":"TargetGroupStickinessDurationSeconds"} + } + }, + "TargetGroupStickinessDurationSeconds":{"type":"integer"}, + "TargetGroupStickinessEnabled":{"type":"boolean"}, + "TargetGroupTuple":{ + "type":"structure", + "members":{ + "TargetGroupArn":{"shape":"TargetGroupArn"}, + "Weight":{"shape":"TargetGroupWeight"} + } + }, + "TargetGroupWeight":{"type":"integer"}, "TargetGroups":{ "type":"list", "member":{"shape":"TargetGroup"} @@ -2282,6 +2315,17 @@ }, "exception":true }, + "TooManyUniqueTargetGroupsPerLoadBalancerException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyUniqueTargetGroupsPerLoadBalancer", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "UnsupportedProtocolException":{ "type":"structure", "members":{ diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 3c7ac399ac2..8186e523977 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -24,9 +24,9 @@ "DescribeTargetGroupAttributes": "Describes the attributes for the specified target group.
For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.
", "DescribeTargetGroups": "Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.
To describe the targets for a target group, use DescribeTargetHealth. To describe the attributes of a target group, use DescribeTargetGroupAttributes.
", "DescribeTargetHealth": "Describes the health of the specified targets or all of your targets.
", - "ModifyListener": "Modifies the specified properties of the specified listener.
Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.
", + "ModifyListener": "Replaces the specified properties of the specified listener. Any properties that you do not specify remain unchanged.
Changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.
To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.
", "ModifyLoadBalancerAttributes": "Modifies the specified attributes of the specified Application Load Balancer or Network Load Balancer.
If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.
", - "ModifyRule": "Modifies the specified rule.
Any existing properties that you do not modify retain their current values.
To modify the actions for the default rule, use ModifyListener.
", + "ModifyRule": "Replaces the specified properties of the specified rule. Any properties that you do not specify are unchanged.
To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.
To modify the actions for the default rule, use ModifyListener.
", "ModifyTargetGroup": "Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth.
", "ModifyTargetGroupAttributes": "Modifies the specified attributes of the specified target group.
", "RegisterTargets": "Registers the specified targets with the specified target group.
If the target is an EC2 instance, it must be in the running
state when you register it.
By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.
With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.
To remove a target from a target group, use DeregisterTargets.
", @@ -59,11 +59,11 @@ "Actions": { "base": null, "refs": { - "CreateListenerInput$DefaultActions": "The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.
If the action type is forward
, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions. Each rule must include exactly one of the following types of actions: forward
, fixed-response
, or redirect
, and it must be the last action to be performed.
If the action type is forward
, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.
If the action type is forward
, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions. Each rule must include exactly one of the following types of actions: forward
, fixed-response
, or redirect
, and it must be the last action to be performed.
If the action type is forward
, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The default actions for the listener.
", - "ModifyListenerInput$DefaultActions": "The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.
If the action type is forward
, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions. Each rule must include exactly one of the following types of actions: forward
, fixed-response
, or redirect
, and it must be the last action to be performed.
If the action type is forward
, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.
If the action type is forward
, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions. Each rule must include exactly one of the following types of actions: forward
, fixed-response
, or redirect
, and it must be the last action to be performed.
If the action type is forward
, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.
[HTTPS listeners] If the action type is authenticate-oidc
, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.
[HTTPS listeners] If the action type is authenticate-cognito
, you authenticate users through the user pools supported by Amazon Cognito.
[Application Load Balancer] If the action type is redirect
, you redirect specified client requests from one URL to another.
[Application Load Balancer] If the action type is fixed-response
, you drop specified client requests and return a custom HTTP response.
The actions. Each rule must include exactly one of the following types of actions: forward
, redirect
, or fixed-response
, and it must be the last action to be performed.
The HTTP response code (2XX, 4XX, or 5XX).
" } }, + "ForwardActionConfig": { + "base": "Information about a forward action.
", + "refs": { + "Action$ForwardConfig": "Information for creating an action that distributes requests among one or more target groups. For Network Load Balancers, you can specify a single target group. Specify only when Type
is forward
. If you specify both ForwardConfig
and TargetGroupArn
, you can specify only one target group using ForwardConfig
and it must be the same target group specified in TargetGroupArn
.
The name of the attribute.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is true
.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
.
The following attributes are supported by only Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.
The name of the attribute.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
.
The following attributes are supported by only Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.
The name of the limit. The possible values are:
application-load-balancers
listeners-per-application-load-balancer
listeners-per-network-load-balancer
network-load-balancers
rules-per-application-load-balancer
target-groups
targets-per-application-load-balancer
targets-per-availability-zone-per-network-load-balancer
targets-per-network-load-balancer
The name of the limit. The possible values are:
application-load-balancers
listeners-per-application-load-balancer
listeners-per-network-load-balancer
network-load-balancers
rules-per-application-load-balancer
target-groups
target-groups-per-action-on-application-load-balancer
target-groups-per-action-on-network-load-balancer
target-groups-per-application-load-balancer
targets-per-application-load-balancer
targets-per-availability-zone-per-network-load-balancer
targets-per-network-load-balancer
The Amazon Resource Name (ARN) of the target group. Specify only when Type
is forward
.
The Amazon Resource Name (ARN) of the target group. Specify only when Type
is forward
and you want to route to a single target group. To route to one or more target groups, use ForwardConfig
instead.
The Amazon Resource Name (ARN) of the target group.
", "DeregisterTargetsInput$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
", "DescribeTargetGroupAttributesInput$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
", @@ -1488,7 +1494,8 @@ "ModifyTargetGroupInput$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
", "RegisterTargetsInput$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
", "TargetGroup$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
", - "TargetGroupArns$member": null + "TargetGroupArns$member": null, + "TargetGroupTuple$TargetGroupArn": "The Amazon Resource Name (ARN) of the target group.
" } }, "TargetGroupArns": { @@ -1528,6 +1535,12 @@ "ModifyTargetGroupAttributesOutput$Attributes": "Information about the attributes.
" } }, + "TargetGroupList": { + "base": null, + "refs": { + "ForwardActionConfig$TargetGroups": "One or more target groups. For Network Load Balancers, you can specify a single target group.
" + } + }, "TargetGroupName": { "base": null, "refs": { @@ -1547,6 +1560,36 @@ "refs": { } }, + "TargetGroupStickinessConfig": { + "base": "Information about the target group stickiness for a rule.
", + "refs": { + "ForwardActionConfig$TargetGroupStickinessConfig": "The target group stickiness for the rule.
" + } + }, + "TargetGroupStickinessDurationSeconds": { + "base": null, + "refs": { + "TargetGroupStickinessConfig$DurationSeconds": "The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
" + } + }, + "TargetGroupStickinessEnabled": { + "base": null, + "refs": { + "TargetGroupStickinessConfig$Enabled": "Indicates whether target group stickiness is enabled.
" + } + }, + "TargetGroupTuple": { + "base": "Information about how traffic will be distributed between multiple target groups in a forward rule.
", + "refs": { + "TargetGroupList$member": null + } + }, + "TargetGroupWeight": { + "base": null, + "refs": { + "TargetGroupTuple$Weight": "The weight. The range is 0 to 999.
" + } + }, "TargetGroups": { "base": null, "refs": { @@ -1643,6 +1686,11 @@ "refs": { } }, + "TooManyUniqueTargetGroupsPerLoadBalancerException": { + "base": "You've reached the limit on the number of unique target groups per load balancer across all listeners. If a target group is used by multiple actions for a load balancer, it is counted as only one use.
", + "refs": { + } + }, "UnsupportedProtocolException": { "base": "The specified protocol is not supported.
", "refs": { diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index 0c689077f09..f3c4c4afdee 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -396,7 +396,8 @@ "type":"structure", "members":{ "ClusterId":{"shape":"XmlStringMaxLen256"}, - "InstanceFleetId":{"shape":"InstanceFleetId"} + "InstanceFleetId":{"shape":"InstanceFleetId"}, + "ClusterArn":{"shape":"ArnType"} } }, "AddInstanceGroupsInput":{ @@ -414,7 +415,8 @@ "type":"structure", "members":{ "JobFlowId":{"shape":"XmlStringMaxLen256"}, - "InstanceGroupIds":{"shape":"InstanceGroupIdsList"} + "InstanceGroupIds":{"shape":"InstanceGroupIdsList"}, + "ClusterArn":{"shape":"ArnType"} } }, "AddJobFlowStepsInput":{ @@ -653,7 +655,8 @@ "CustomAmiId":{"shape":"XmlStringMaxLen256"}, "EbsRootVolumeSize":{"shape":"Integer"}, "RepoUpgradeOnBoot":{"shape":"RepoUpgradeOnBoot"}, - "KerberosAttributes":{"shape":"KerberosAttributes"} + "KerberosAttributes":{"shape":"KerberosAttributes"}, + "ClusterArn":{"shape":"ArnType"} } }, "ClusterId":{"type":"string"}, @@ -707,7 +710,8 @@ "Id":{"shape":"ClusterId"}, "Name":{"shape":"String"}, "Status":{"shape":"ClusterStatus"}, - "NormalizedInstanceHours":{"shape":"Integer"} + "NormalizedInstanceHours":{"shape":"Integer"}, + "ClusterArn":{"shape":"ArnType"} } }, "ClusterSummaryList":{ @@ -1712,7 +1716,8 @@ "members":{ "ClusterId":{"shape":"ClusterId"}, "InstanceGroupId":{"shape":"InstanceGroupId"}, - "AutoScalingPolicy":{"shape":"AutoScalingPolicyDescription"} + "AutoScalingPolicy":{"shape":"AutoScalingPolicyDescription"}, + "ClusterArn":{"shape":"ArnType"} } }, "PutBlockPublicAccessConfigurationInput":{ @@ -1802,7 +1807,8 @@ "RunJobFlowOutput":{ "type":"structure", "members":{ - "JobFlowId":{"shape":"XmlStringMaxLen256"} + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "ClusterArn":{"shape":"ArnType"} } }, "ScaleDownBehavior":{ diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index 1cbb3ee50de..6d5aecc63b8 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -20,7 +20,7 @@ "ListInstanceGroups": "Provides all available details about the instance groups in a cluster.
", "ListInstances": "Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
", "ListSecurityConfigurations": "Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.
", - "ListSteps": "Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request.
", + "ListSteps": "Provides a list of steps for the cluster in reverse order unless you specify stepIds
with the request of filter by StepStates
. You can specify a maximum of ten stepIDs
.
Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.
", "PutAutoScalingPolicy": "Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.
", @@ -29,7 +29,7 @@ "RemoveTags": "Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.
The following example removes the stack tag with value Prod from a cluster:
", "RunJobFlow": "RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps
parameter is set to TRUE
, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.
For additional protection, you can set the JobFlowInstancesConfig TerminationProtected
parameter to TRUE
to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
For long running clusters, we recommend that you periodically store your results.
The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection
on a cluster is similar to calling the Amazon EC2 DisableAPITermination
API on all EC2 instances in a cluster.
SetTerminationProtection
is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting SetTerminationProtection
to true
, you must first unlock the job flow by a subsequent call to SetTerminationProtection
in which you set the value to false
.
For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.
", - "SetVisibleToAllUsers": "This member will be deprecated.
Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers
parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.
Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false
, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true
setting when you create a cluster by using the VisibleToAllUsers
parameter with RunJobFlow
.
TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.
The maximum number of clusters allowed is 10. The call to TerminateJobFlows
is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.
The Amazon Resource Name that created or last modified the configuration.
" + "AddInstanceFleetOutput$ClusterArn": "The Amazon Resource Name of the cluster.
", + "AddInstanceGroupsOutput$ClusterArn": "The Amazon Resource Name of the cluster.
", + "BlockPublicAccessConfigurationMetadata$CreatedByArn": "The Amazon Resource Name that created or last modified the configuration.
", + "Cluster$ClusterArn": "The Amazon Resource Name of the cluster.
", + "ClusterSummary$ClusterArn": "The Amazon Resource Name of the cluster.
", + "PutAutoScalingPolicyOutput$ClusterArn": "The Amazon Resource Name of the cluster.
", + "RunJobFlowOutput$ClusterArn": "The Amazon Resource Name of the cluster.
" } }, "AutoScalingPolicy": { @@ -163,15 +169,15 @@ "BlockPublicAccessConfiguration$BlockPublicSecurityGroupRules": "Indicates whether EMR block public access is enabled (true
) or disabled (false
). By default, the value is false
for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true
.
Specifies whether the cluster should terminate after completing all steps.
", "Cluster$TerminationProtected": "Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.
", - "Cluster$VisibleToAllUsers": "This member will be deprecated.
Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true
, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false
, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.
This member will be deprecated.
Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true
, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false
, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.
Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
Specifies whether the cluster should remain available after completing all steps.
", "JobFlowInstancesConfig$TerminationProtected": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.
", "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "Specifies whether the cluster should remain available after completing all steps.
", "JobFlowInstancesDetail$TerminationProtected": "Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
", - "RunJobFlowInput$VisibleToAllUsers": "This member will be deprecated.
Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true
, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false
, only the IAM user that created the cluster can view and manage it.
A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.
", - "SetVisibleToAllUsersInput$VisibleToAllUsers": "This member will be deprecated.
Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.
" + "SetVisibleToAllUsersInput$VisibleToAllUsers": "A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
The number of periods, expressed in seconds using Period
, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1
.
The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1
.
The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300
.
An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.
", "Cluster$EbsRootVolumeSize": "The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
", @@ -1278,7 +1284,7 @@ } }, "SetVisibleToAllUsersInput": { - "base": "This member will be deprecated.
The input to the SetVisibleToAllUsers action.
", + "base": "The input to the SetVisibleToAllUsers action.
", "refs": { } }, @@ -1468,7 +1474,7 @@ "Instance$InstanceGroupId": "The identifier of the instance group to which this instance belongs.
", "InstanceFleetStateChangeReason$Message": "An explanatory message.
", "InstanceGroup$Name": "The name of the instance group.
", - "InstanceGroup$BidPrice": "The maximum Spot price your are willing to pay for EC2 instances.
An optional, nullable field that applies if the MarketType
for the instance group is specified as SPOT
. Specify the maximum spot price in USD. If the value is NULL and SPOT
is specified, the maximum Spot price is set equal to the On-Demand price.
The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
The status change reason description.
", "InstanceStateChangeReason$Message": "The status change reason description.
", "MetricDimension$Key": "The dimension name.
", @@ -1607,10 +1613,10 @@ "refs": { "DescribeJobFlowsInput$JobFlowIds": "Return only job flows whose job flow ID is contained in this list.
", "HadoopJarStepConfig$Args": "A list of command line arguments passed to the JAR file's main function when executed.
", - "ListStepsInput$StepIds": "The filter to limit the step list based on the identifier of the steps.
", + "ListStepsInput$StepIds": "The filter to limit the step list based on the identifier of the steps. You can specify a maximum of ten Step IDs. The character constraint applies to the overall length of the array.
", "ScriptBootstrapActionConfig$Args": "A list of command line arguments to pass to the bootstrap action script.
", "SetTerminationProtectionInput$JobFlowIds": "A list of strings that uniquely identify the clusters to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows .
", - "SetVisibleToAllUsersInput$JobFlowIds": "Identifiers of the job flows to receive the new visibility setting.
", + "SetVisibleToAllUsersInput$JobFlowIds": "The unique identifier of the job flow (cluster).
", "SupportedProductConfig$Args": "The list of user-supplied arguments.
", "TerminateJobFlowsInput$JobFlowIds": "A list of job flows to be shutdown.
" } @@ -1629,10 +1635,10 @@ "InstanceFleet$Name": "A friendly name for the instance fleet.
", "InstanceFleetConfig$Name": "The friendly name of the instance fleet.
", "InstanceGroupConfig$Name": "Friendly name given to the instance group.
", - "InstanceGroupConfig$BidPrice": "The maximum Spot price your are willing to pay for EC2 instances.
An optional, nullable field that applies if the MarketType
for the instance group is specified as SPOT
. Specify the maximum spot price in USD. If the value is NULL and SPOT
is specified, the maximum Spot price is set equal to the On-Demand price.
The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Unique identifier for the instance group.
", "InstanceGroupDetail$Name": "Friendly name for the instance group.
", - "InstanceGroupDetail$BidPrice": "The maximum Spot price your are willing to pay for EC2 instances.
An optional, nullable field that applies if the MarketType
for the instance group is specified as SPOT
. Specified in USD. If the value is NULL and SPOT
is specified, the maximum Spot price is set equal to the On-Demand price.
The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Unique ID of the instance group to expand or shrink.
", "InstanceTypeConfig$BidPrice": "The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk.
", "operations": { - "CreateDeliveryStream": "Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per AWS Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
", - "DeleteDeliveryStream": "Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in ACTIVE
or DELETING
state, and not in the CREATING
state. While the deletion request is in process, the delivery stream is in the DELETING
state.
To check the state of a delivery stream, use DescribeDeliveryStream.
While the delivery stream is DELETING
state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.
Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream
to see whether the delivery stream is ACTIVE
and therefore ready for data to be sent to it.
Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per AWS Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
", + "DeleteDeliveryStream": "Deletes a delivery stream and its data.
To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. While the deletion request is in process, the delivery stream is in the DELETING
state.
While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream
to see whether the delivery stream is ACTIVE
and therefore ready for data to be sent to it.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED
, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.
Lists your delivery streams in alphabetical order of their names.
The number of delivery streams might be too large to return using a single call to ListDeliveryStreams
. You can limit the number of delivery streams returned, using the Limit
parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams
in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName
parameter to the name of the last delivery stream returned in the last call.
Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
", "PutRecord": "Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to ENABLING
, and then to ENABLED
. You can continue to read and write data to your stream while its status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
You can only enable SSE for a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again.
You can only enable SSE for a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.
Each delivery stream can have up to 50 tags.
This operation has a limit of five transactions per second per account.
", "UntagDeliveryStream": "Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.
If you specify a tag that doesn't exist, the operation ignores it.
This operation has a limit of five transactions per second per account.
", "UpdateDestination": "Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.
If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
If KeyType
is CUSTOMER_MANAGED_CMK
, this field contains the ARN of the customer managed CMK. If KeyType
is AWS_OWNED_CMK
, DeliveryStreamEncryptionConfiguration
doesn't contain a value for KeyARN
.
If you set KeyType
to CUSTOMER_MANAGED_CMK
, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType
to AWS_OWNED_CMK
, Kinesis Data Firehose uses a service-account CMK.
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
" } }, @@ -34,6 +36,7 @@ "refs": { "CloudWatchLoggingOptions$Enabled": "Enables or disables CloudWatch logging.
", "DataFormatConversionConfiguration$Enabled": "Defaults to true
. Set it to false
if you want to disable format conversion while preserving the configuration details.
Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation.
The default value is false.
", "DeliveryStreamDescription$HasMoreDestinations": "Indicates whether there are more destinations available to list.
", "ListDeliveryStreamsOutput$HasMoreDeliveryStreams": "Indicates whether there are more delivery streams available to list.
", "ListTagsForDeliveryStreamOutput$HasMoreTags": "If this is true
in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey
to the key of the last tag returned and call ListTagsForDeliveryStream
again.
Indicates the server-side encryption (SSE) status for the delivery stream.
", + "base": "Contains information about the server-side encryption (SSE) status for the delivery stream, the type customer master key (CMK) in use, if any, and the ARN of the CMK. You can get DeliveryStreamEncryptionConfiguration
by invoking the DescribeDeliveryStream operation.
Indicates the server-side encryption (SSE) status for the delivery stream.
" } }, + "DeliveryStreamEncryptionConfigurationInput": { + "base": "Used to specify the type and Amazon Resource Name (ARN) of the CMK needed for Server-Side Encryption (SSE).
", + "refs": { + "CreateDeliveryStreamInput$DeliveryStreamEncryptionConfigurationInput": "Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).
", + "StartDeliveryStreamEncryptionInput$DeliveryStreamEncryptionConfigurationInput": "Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).
" + } + }, "DeliveryStreamEncryptionStatus": { "base": null, "refs": { - "DeliveryStreamEncryptionConfiguration$Status": "For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption.
" + "DeliveryStreamEncryptionConfiguration$Status": "This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED
or DISABLING_FAILED
, it is the status of the most recent attempt to enable or disable SSE, respectively.
The type of error that caused the failure.
" } }, "DeliveryStreamName": { @@ -237,7 +253,7 @@ "DeliveryStreamStatus": { "base": null, "refs": { - "DeliveryStreamDescription$DeliveryStreamStatus": "The status of the delivery stream.
" + "DeliveryStreamDescription$DeliveryStreamStatus": "The status of the delivery stream. If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
The error code for an individual record result.
" } }, @@ -418,6 +435,7 @@ "refs": { "ConcurrentModificationException$message": "A message that provides information about the error.
", "InvalidArgumentException$message": "A message that provides information about the error.
", + "InvalidKMSResourceException$message": null, "LimitExceededException$message": "A message that provides information about the error.
", "PutRecordBatchResponseEntry$ErrorMessage": "The error message for an individual record result.
", "ResourceInUseException$message": "A message that provides information about the error.
", @@ -454,6 +472,13 @@ "UpdateDestinationInput$ExtendedS3DestinationUpdate": "Describes an update for a destination in Amazon S3.
" } }, + "FailureDescription": { + "base": "Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
", + "refs": { + "DeliveryStreamDescription$FailureDescription": "Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
", + "DeliveryStreamEncryptionConfiguration$FailureDescription": "Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
" + } + }, "HECAcknowledgmentTimeoutInSeconds": { "base": null, "refs": { @@ -509,12 +534,24 @@ "refs": { } }, + "InvalidKMSResourceException": { + "base": "Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException
, InvalidStateException
, DisabledException
, or NotFoundException
.
Describes an encryption key for a destination in Amazon S3.
", "refs": { "EncryptionConfiguration$KMSEncryptionConfig": "The encryption key.
" } }, + "KeyType": { + "base": null, + "refs": { + "DeliveryStreamEncryptionConfiguration$KeyType": "Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs).
Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType
set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is already encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.
" + } + }, "KinesisStreamARN": { "base": null, "refs": { @@ -611,6 +648,7 @@ "base": null, "refs": { "ColumnToJsonKeyMappings$value": null, + "FailureDescription$Details": "A message providing details about the error that caused the failure.
", "ListOfNonEmptyStrings$member": null } }, @@ -679,7 +717,7 @@ "ParquetCompression": { "base": null, "refs": { - "ParquetSerDe$Compression": "The compression code to use over data blocks. The possible values are UNCOMPRESSED
, SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
for higher decompression speed. Use GZIP
if the compression ration is more important than speed.
The compression code to use over data blocks. The possible values are UNCOMPRESSED
, SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.
", "refs": { - "FileSystem$DNSName": "The DNS name for the file system.
" + "FileSystem$DNSName": "The DNS name for the file system.
", + "WindowsFileSystemConfiguration$RemoteAdministrationEndpoint": "For MULTI_AZ_1
deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.
For SINGLE_AZ_1
deployment types, this is the DNS name of the file system.
This endpoint is temporarily unavailable when the file system is undergoing maintenance.
" } }, "DailyTime": { @@ -335,7 +336,7 @@ "base": null, "refs": { "SelfManagedActiveDirectoryAttributes$FileSystemAdministratorsGroup": "The name of the domain group whose members have administrative privileges for the FSx file system.
", - "SelfManagedActiveDirectoryConfiguration$FileSystemAdministratorsGroup": "(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.
" + "SelfManagedActiveDirectoryConfiguration$FileSystemAdministratorsGroup": "(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.
" } }, "FileSystemFailureDetails": { @@ -366,7 +367,7 @@ "base": "The lifecycle status of the file system.
", "refs": { "DeleteFileSystemResponse$Lifecycle": "The file system lifecycle for the deletion request. Should be DELETING
.
The lifecycle status of the file system:
AVAILABLE
indicates that the file system is reachable and available for use.
CREATING
indicates that Amazon FSx is in the process of creating the new file system.
DELETING
indicates that Amazon FSx is in the process of deleting the file system.
FAILED
indicates that Amazon FSx was not able to create the file system.
MISCONFIGURED
indicates that the file system is in a failed but recoverable state.
UPDATING
indicates that the file system is undergoing a customer initiated update.
The lifecycle status of the file system, following are the possible values and what they mean:
AVAILABLE
- The file system is in a healthy state, and is reachable and available for use.
CREATING
- Amazon FSx is creating the new file system.
DELETING
- Amazon FSx is deleting an existing file system.
FAILED
- An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.
MISCONFIGURED
indicates that the file system is in a failed but recoverable state.
UPDATING
indicates that the file system is undergoing a customer initiated update.
For MULTI_AZ_1
deployment types, the IP address of the primary, or preferred, file server.
Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for both SINGLE_AZ_1
and MULTI_AZ_1
deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information and instruction on mapping and mounting file shares, see https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html.
The storage capacity for your Amazon FSx file system, in gibibytes.
", "refs": { - "CreateFileSystemRequest$StorageCapacity": "The storage capacity of the file system being created.
For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.
For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.
", + "CreateFileSystemRequest$StorageCapacity": "The storage capacity of the file system being created.
For Windows file systems, valid values are 32 GiB - 65,536 GiB.
For Lustre file systems, valid values are 1,200, 2,400, 3,600, then continuing in increments of 3600 GiB.
", "FileSystem$StorageCapacity": "The storage capacity of the file system in gigabytes (GB).
" } }, "SubnetId": { "base": "The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.
", "refs": { + "CreateFileSystemWindowsConfiguration$PreferredSubnetId": "Required when DeploymentType
is set to MULTI_AZ_1
. This specifies the subnet in which you want the preferred file server to be located. For in-AWS applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency.
For MULTI_AZ_1
deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in SubnetIds
property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.
For SINGLE_AZ_1
deployment types, this value is the same as that for SubnetIDs
.
A list of subnet IDs. Currently, you can specify only one subnet ID in a call to the CreateFileSystem
operation.
A list of IDs for the subnets that the file system will be accessible from. Currently, you can specify only one subnet. The file server is also launched in that subnet's Availability Zone.
", - "CreateFileSystemRequest$SubnetIds": "The IDs of the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.
", + "CreateFileSystemRequest$SubnetIds": "Specifies the IDs of the subnets that the file system will be accessible from. For Windows MULTI_AZ_1
file system deployment types, provide exactly two subnet IDs, one for the preferred file server and one for the standy file server. You specify one of these subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID
property.
For Windows SINGLE_AZ_1
file system deployment types and Lustre file systems, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.
The ID of the subnet to contain the endpoint for the file system. One and only one is supported. The file system is launched in the Availability Zone associated with this subnet.
" } }, @@ -757,6 +761,13 @@ "WindowsFileSystemConfiguration$WeeklyMaintenanceStartTime": "The preferred time to perform weekly maintenance, in the UTC time zone.
" } }, + "WindowsDeploymentType": { + "base": null, + "refs": { + "CreateFileSystemWindowsConfiguration$DeploymentType": "Specifies the file system deployment type, valid values are the following:
MULTI_AZ_1 - Deploys a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You can only deploy a Multi-AZ file system in AWS Regions that have a minimum of three Availability Zones.
SINGLE_AZ_1 - (Default) Choose to deploy a file system that is configured for single AZ redundancy.
To learn more about high availability Multi-AZ file systems, see High Availability for Amazon FSx for Windows File Server.
", + "WindowsFileSystemConfiguration$DeploymentType": "Specifies the file system deployment type, valid values are the following:
MULTI_AZ_1
- Specifies a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.
SINGLE_AZ_1
- (Default) Specifies a file system that is configured for single AZ redundancy.
The configuration for this Microsoft Windows file system.
", "refs": { diff --git a/models/apis/guardduty/2017-11-28/api-2.json b/models/apis/guardduty/2017-11-28/api-2.json index cb415437b3e..871c59b60c7 100644 --- a/models/apis/guardduty/2017-11-28/api-2.json +++ b/models/apis/guardduty/2017-11-28/api-2.json @@ -96,6 +96,20 @@ {"shape":"InternalServerErrorException"} ] }, + "CreatePublishingDestination":{ + "name":"CreatePublishingDestination", + "http":{ + "method":"POST", + "requestUri":"/detector/{detectorId}/publishingDestination", + "responseCode":200 + }, + "input":{"shape":"CreatePublishingDestinationRequest"}, + "output":{"shape":"CreatePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "CreateSampleFindings":{ "name":"CreateSampleFindings", "http":{ @@ -208,6 +222,20 @@ {"shape":"InternalServerErrorException"} ] }, + "DeletePublishingDestination":{ + "name":"DeletePublishingDestination", + "http":{ + "method":"DELETE", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"DeletePublishingDestinationRequest"}, + "output":{"shape":"DeletePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "DeleteThreatIntelSet":{ "name":"DeleteThreatIntelSet", "http":{ @@ -222,6 +250,20 @@ {"shape":"InternalServerErrorException"} ] }, + "DescribePublishingDestination":{ + "name":"DescribePublishingDestination", + "http":{ + "method":"GET", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"DescribePublishingDestinationRequest"}, + "output":{"shape":"DescribePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", "http":{ @@ -474,6 +516,20 @@ {"shape":"InternalServerErrorException"} ] }, + "ListPublishingDestinations":{ + "name":"ListPublishingDestinations", + "http":{ + "method":"GET", + "requestUri":"/detector/{detectorId}/publishingDestination", + "responseCode":200 + }, + "input":{"shape":"ListPublishingDestinationsRequest"}, + "output":{"shape":"ListPublishingDestinationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -628,6 +684,20 @@ {"shape":"InternalServerErrorException"} ] }, + "UpdatePublishingDestination":{ + "name":"UpdatePublishingDestination", + "http":{ + "method":"POST", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"UpdatePublishingDestinationRequest"}, + "output":{"shape":"UpdatePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "UpdateThreatIntelSet":{ "name":"UpdateThreatIntelSet", "http":{ @@ -1073,6 +1143,44 @@ } } }, + "CreatePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationType", + "DestinationProperties" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + }, + "ClientToken":{ + "shape":"ClientToken", + "idempotencyToken":true, + "locationName":"clientToken" + } + } + }, + "CreatePublishingDestinationResponse":{ + "type":"structure", + "required":["DestinationId"], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + } + } + }, "CreateSampleFindingsRequest":{ "type":"structure", "required":["DetectorId"], @@ -1282,6 +1390,30 @@ } } }, + "DeletePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + } + } + }, + "DeletePublishingDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteThreatIntelSetRequest":{ "type":"structure", "required":[ @@ -1306,6 +1438,102 @@ "members":{ } }, + "DescribePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + } + } + }, + "DescribePublishingDestinationResponse":{ + "type":"structure", + "required":[ + "DestinationId", + "DestinationType", + "Status", + "PublishingFailureStartTimestamp", + "DestinationProperties" + ], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "Status":{ + "shape":"PublishingStatus", + "locationName":"status" + }, + "PublishingFailureStartTimestamp":{ + "shape":"Long", + "locationName":"publishingFailureStartTimestamp" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + } + } + }, + "Destination":{ + "type":"structure", + "required":[ + "DestinationId", + "DestinationType", + "Status" + ], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "Status":{ + "shape":"PublishingStatus", + "locationName":"status" + } + } + }, + "DestinationProperties":{ + "type":"structure", + "members":{ + "DestinationArn":{ + "shape":"String", + "locationName":"destinationArn" + }, + "KmsKeyArn":{ + "shape":"String", + "locationName":"kmsKeyArn" + } + } + }, + "DestinationType":{ + "type":"string", + "enum":["S3"], + "max":300, + "min":1 + }, + "Destinations":{ + "type":"list", + "member":{"shape":"Destination"} + }, "DetectorId":{ "type":"string", "max":300, @@ -2326,6 +2554,41 @@ } } }, + "ListPublishingDestinationsRequest":{ + "type":"structure", + "required":["DetectorId"], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPublishingDestinationsResponse":{ + "type":"structure", + "required":["Destinations"], + "members":{ + "Destinations":{ + "shape":"Destinations", + "locationName":"destinations" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2655,6 +2918,17 @@ "type":"list", "member":{"shape":"ProductCode"} }, + "PublishingStatus":{ + "type":"string", + "enum":[ + "PENDING_VERIFICATION", + "PUBLISHING", + "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY", + "STOPPED" + ], + "max":300, + "min":1 + }, "RemoteIpDetails":{ "type":"structure", "members":{ @@ -3166,6 +3440,34 @@ "members":{ } }, + "UpdatePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + } + } + }, + "UpdatePublishingDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateThreatIntelSetRequest":{ "type":"structure", "required":[ diff --git a/models/apis/guardduty/2017-11-28/docs-2.json b/models/apis/guardduty/2017-11-28/docs-2.json index 644fa23771b..c9edd87f752 100644 --- a/models/apis/guardduty/2017-11-28/docs-2.json +++ b/models/apis/guardduty/2017-11-28/docs-2.json @@ -1,29 +1,32 @@ { "version": "2.0", - "service": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IPs and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a region that has never been used, or unusual API calls, like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see Amazon GuardDuty User Guide.
", + "service": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IPs and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a region that has never been used, or unusual API calls, like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see Amazon GuardDuty User Guide.
", "operations": { "AcceptInvitation": "Accepts the invitation to be monitored by a master GuardDuty account.
", "ArchiveFindings": "Archives GuardDuty findings specified by the list of finding IDs.
Only the master account can archive findings. Member accounts do not have permission to archive findings from their accounts.
Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each region that you enable the service. You can have only one detector per account per region.
", "CreateFilter": "Creates a filter using the specified finding criteria.
", - "CreateIPSet": "Creates a new IPSet - a list of trusted IP addresses that have been whitelisted for secure communication with AWS infrastructure and applications.
", + "CreateIPSet": "Creates a new IPSet, called Trusted IP list in the consoler user interface. An IPSet is a list IP addresses trusted for secure communication with AWS infrastructure and applications. GuardDuty does not generate findings for IP addresses included in IPSets. Only users from the master account can use this operation.
", "CreateMembers": "Creates member accounts of the current AWS account by specifying a list of AWS account IDs. The current AWS account can then invite these members to manage GuardDuty in their accounts.
", - "CreateSampleFindings": "Generates example findings of types specified by the list of finding types. If 'NULL' is specified for findingTypes, the API generates example findings of all supported finding types.
", - "CreateThreatIntelSet": "Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets.
", + "CreatePublishingDestination": "Creates a publishing destination to send findings to. The resource to send findings to must exist before you use this operation.
", + "CreateSampleFindings": "Generates example findings of types specified by the list of finding types. If 'NULL' is specified for findingTypes
, the API generates example findings of all supported finding types.
Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets. Only users of the master account can use this operation.
", "DeclineInvitations": "Declines invitations sent to the current member account by AWS account specified by their account IDs.
", "DeleteDetector": "Deletes a Amazon GuardDuty detector specified by the detector ID.
", "DeleteFilter": "Deletes the filter specified by the filter name.
", - "DeleteIPSet": "Deletes the IPSet specified by the IPSet ID.
", + "DeleteIPSet": "Deletes the IPSet specified by the ipSetId
. IPSets are called Trusted IP lists in the console user interface.
Deletes invitations sent to the current member account by AWS accounts specified by their account IDs.
", "DeleteMembers": "Deletes GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.
", + "DeletePublishingDestination": "Deletes the publishing definition with the specified destinationId
.
Deletes ThreatIntelSet specified by the ThreatIntelSet ID.
", + "DescribePublishingDestination": "Returns information about the publishing destination specified by the provided destinationId
.
Disassociates the current GuardDuty member account from its master account.
", "DisassociateMembers": "Disassociates GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.
", "GetDetector": "Retrieves an Amazon GuardDuty detector specified by the detectorId.
", "GetFilter": "Returns the details of the filter specified by the filter name.
", "GetFindings": "Describes Amazon GuardDuty findings specified by finding IDs.
", "GetFindingsStatistics": "Lists Amazon GuardDuty findings' statistics for the specified detector ID.
", - "GetIPSet": "Retrieves the IPSet specified by the IPSet ID.
", + "GetIPSet": "Retrieves the IPSet specified by the ipSetId
.
Returns the count of all GuardDuty membership invitations that were sent to the current member account except the currently accepted invitation.
", "GetMasterAccount": "Provides the details for the GuardDuty master account associated with the current GuardDuty member account.
", "GetMembers": "Retrieves GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.
", @@ -32,20 +35,22 @@ "ListDetectors": "Lists detectorIds of all the existing Amazon GuardDuty detector resources.
", "ListFilters": "Returns a paginated list of the current filters.
", "ListFindings": "Lists Amazon GuardDuty findings for the specified detector ID.
", - "ListIPSets": "Lists the IPSets of the GuardDuty service specified by the detector ID.
", + "ListIPSets": "Lists the IPSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the IPSets returned are the IPSets from the associated master account.
", "ListInvitations": "Lists all GuardDuty membership invitations that were sent to the current AWS account.
", "ListMembers": "Lists details about all member accounts for the current GuardDuty master account.
", + "ListPublishingDestinations": "Returns a list of publishing destinations associated with the specified dectectorId
.
Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource..
", - "ListThreatIntelSets": "Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID.
", - "StartMonitoringMembers": "Re-enables GuardDuty to monitor findings of the member accounts specified by the account IDs. A master GuardDuty account can run this command after disabling GuardDuty from monitoring these members' findings by running StopMonitoringMembers.
", - "StopMonitoringMembers": "Disables GuardDuty from monitoring findings of the member accounts specified by the account IDs. After running this command, a master GuardDuty account can run StartMonitoringMembers to re-enable GuardDuty to monitor these members’ findings.
", + "ListThreatIntelSets": "Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the ThreatIntelSets associated with the master account are returned.
", + "StartMonitoringMembers": "Turns on GuardDuty monitoring of the specified member accounts. Use this operation to restart monitoring of accounts that you stopped monitoring with the StopMonitoringMembers
operation.
Stops GuardDuty monitoring for the specified member accounnts. Use the StartMonitoringMembers
to restart monitoring for those accounts.
Adds tags to a resource.
", - "UnarchiveFindings": "Unarchives Amazon GuardDuty findings specified by the list of finding IDs.
", + "UnarchiveFindings": "Unarchives GuardDuty findings specified by the findingIds
.
Removes tags from a resource.
", - "UpdateDetector": "Updates an Amazon GuardDuty detector specified by the detectorId.
", + "UpdateDetector": "Updates the Amazon GuardDuty detector specified by the detectorId.
", "UpdateFilter": "Updates the filter specified by the filter name.
", - "UpdateFindingsFeedback": "Marks specified Amazon GuardDuty findings as useful or not useful.
", + "UpdateFindingsFeedback": "Marks the specified GuardDuty findings as useful or not useful.
", "UpdateIPSet": "Updates the IPSet specified by the IPSet ID.
", + "UpdatePublishingDestination": "Updates information about the publishing destination specified by the destinationId
.
Updates the ThreatIntelSet specified by ThreatIntelSet ID.
" }, "shapes": { @@ -82,7 +87,7 @@ "refs": { "AccountDetail$AccountId": "Member account ID.
", "AccountIds$member": null, - "Invitation$AccountId": "Inviter account ID
", + "Invitation$AccountId": "The ID of the account from which the invitations was sent.
", "Master$AccountId": "The ID of the account used as the Master account.
", "Member$AccountId": "Member account ID.
", "UnprocessedAccount$AccountId": "AWS Account ID.
" @@ -97,7 +102,7 @@ "DisassociateMembersRequest$AccountIds": "A list of account IDs of the GuardDuty member accounts that you want to disassociate from master.
", "GetMembersRequest$AccountIds": "A list of account IDs of the GuardDuty member accounts that you want to describe.
", "InviteMembersRequest$AccountIds": "A list of account IDs of the accounts that you want to invite to GuardDuty as members.
", - "StartMonitoringMembersRequest$AccountIds": "A list of account IDs of the GuardDuty member accounts whose findings you want the master account to monitor.
", + "StartMonitoringMembersRequest$AccountIds": "A list of account IDs of the GuardDuty member accounts to start monitoring.
", "StopMonitoringMembersRequest$AccountIds": "A list of account IDs of the GuardDuty member accounts whose findings you want the master account to stop monitoring.
" } }, @@ -138,7 +143,7 @@ "NetworkConnectionAction$Blocked": "Network connection blocked information.
", "PortProbeAction$Blocked": "Port probe blocked information.
", "Service$Archived": "Indicates whether this finding is archived.
", - "UpdateDetectorRequest$Enable": "Updated boolean value for the detector that specifies whether the detector is enabled.
", + "UpdateDetectorRequest$Enable": "Specifies whether the detector is enabled or not enabled.
", "UpdateIPSetRequest$Activate": "The updated boolean value that specifies whether the IPSet is active or not.
", "UpdateThreatIntelSetRequest$Activate": "The updated boolean value that specifies whether the ThreateIntelSet is active or not.
" } @@ -155,6 +160,7 @@ "CreateDetectorRequest$ClientToken": "The idempotency token for the create request.
", "CreateFilterRequest$ClientToken": "The idempotency token for the create request.
", "CreateIPSetRequest$ClientToken": "The idempotency token for the create request.
", + "CreatePublishingDestinationRequest$ClientToken": "The idempotency token for the request.
", "CreateThreatIntelSetRequest$ClientToken": "The idempotency token for the create request.
" } }, @@ -171,7 +177,7 @@ } }, "Country": { - "base": "Contains information about the country.
", + "base": "Contains information about the country in which the remote IP address is located.
", "refs": { "RemoteIpDetails$Country": "Country code of the remote IP address.
" } @@ -216,6 +222,16 @@ "refs": { } }, + "CreatePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "CreatePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "CreateSampleFindingsRequest": { "base": null, "refs": { @@ -302,6 +318,16 @@ "refs": { } }, + "DeletePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeletePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "DeleteThreatIntelSetRequest": { "base": null, "refs": { @@ -312,6 +338,44 @@ "refs": { } }, + "DescribePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "DescribePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, + "Destination": { + "base": "Contains information about a publishing destination, including the ID, type, and status.
", + "refs": { + "Destinations$member": null + } + }, + "DestinationProperties": { + "base": "Contains the ARN of the resource to publish to, such as an S3 bucket, and the ARN of the KMS key to use to encrypt published findings.
", + "refs": { + "CreatePublishingDestinationRequest$DestinationProperties": "Properties of the publishing destination, including the ARNs for the destination and the KMS key used for encryption.
", + "DescribePublishingDestinationResponse$DestinationProperties": "A DestinationProperties
object that includes the DestinationArn
and KmsKeyArn
of the publishing destination.
A DestinationProperties
object that includes the DestinationArn
and KmsKeyArn
of the publishing destination.
The type of resource for the publishing destination. Currently only S3 is supported.
", + "DescribePublishingDestinationResponse$DestinationType": "The type of the publishing destination. Currently, only S3 is supported.
", + "Destination$DestinationType": "The type of resource used for the publishing destination. Currently, only S3 is supported.
" + } + }, + "Destinations": { + "base": null, + "refs": { + "ListPublishingDestinationsResponse$Destinations": "A Destinations
obect that includes information about each publishing destination returned.
The unique ID of the detector of the GuardDuty account for which you want to create a filter.
", "CreateIPSetRequest$DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.
", "CreateMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty account with which you want to associate member accounts.
", + "CreatePublishingDestinationRequest$DetectorId": "The ID of the GuardDuty detector associated with the publishing destination.
", "CreateSampleFindingsRequest$DetectorId": "The ID of the detector to create sample findings for.
", "CreateThreatIntelSetRequest$DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create a threatIntelSet.
", "DeleteDetectorRequest$DetectorId": "The unique ID of the detector that you want to delete.
", "DeleteFilterRequest$DetectorId": "The unique ID of the detector the filter is associated with.
", - "DeleteIPSetRequest$DetectorId": "The unique ID of the detector the ipSet is associated with.
", + "DeleteIPSetRequest$DetectorId": "The unique ID of the detector associated with the IPSet.
", "DeleteMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty account whose members you want to delete.
", + "DeletePublishingDestinationRequest$DetectorId": "The unique ID of the detector associated with the publishing destination to delete.
", "DeleteThreatIntelSetRequest$DetectorId": "The unique ID of the detector the threatIntelSet is associated with.
", + "DescribePublishingDestinationRequest$DetectorId": "The unique ID of the detector associated with the publishing destination to retrieve.
", "DetectorIds$member": null, "DisassociateFromMasterAccountRequest$DetectorId": "The unique ID of the detector of the GuardDuty member account.
", "DisassociateMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty account whose members you want to disassociate from master.
", @@ -344,16 +411,18 @@ "ListFindingsRequest$DetectorId": "The ID of the detector that specifies the GuardDuty service whose findings you want to list.
", "ListIPSetsRequest$DetectorId": "The unique ID of the detector the ipSet is associated with.
", "ListMembersRequest$DetectorId": "The unique ID of the detector the member is associated with.
", + "ListPublishingDestinationsRequest$DetectorId": "The ID of the detector to retrieve publishing destinations for.
", "ListThreatIntelSetsRequest$DetectorId": "The unique ID of the detector the threatIntelSet is associated with.
", "Member$DetectorId": "Member account's detector ID.
", "Service$DetectorId": "Detector ID for the GuardDuty service.
", - "StartMonitoringMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty account whom you want to re-enable to monitor members' findings.
", + "StartMonitoringMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty master account associated with the member accounts to monitor.
", "StopMonitoringMembersRequest$DetectorId": "The unique ID of the detector of the GuardDuty account that you want to stop from monitor members' findings.
", - "UnarchiveFindingsRequest$DetectorId": "The ID of the detector that specifies the GuardDuty service whose findings you want to unarchive.
", - "UpdateDetectorRequest$DetectorId": "The unique ID of the detector that you want to update.
", + "UnarchiveFindingsRequest$DetectorId": "The ID of the detector associated with the findings to unarchive.
", + "UpdateDetectorRequest$DetectorId": "The unique ID of the detector to update.
", "UpdateFilterRequest$DetectorId": "The unique ID of the detector that specifies the GuardDuty service where you want to update a filter.
", - "UpdateFindingsFeedbackRequest$DetectorId": "The ID of the detector that specifies the GuardDuty service whose findings you want to mark as useful or not useful.
", + "UpdateFindingsFeedbackRequest$DetectorId": "The ID of the detector associated with the findings to update feedback for.
", "UpdateIPSetRequest$DetectorId": "The detectorID that specifies the GuardDuty service whose IPSet you want to update.
", + "UpdatePublishingDestinationRequest$DetectorId": "The ID of the
", "UpdateThreatIntelSetRequest$DetectorId": "The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.
" } }, @@ -390,7 +459,7 @@ } }, "DnsRequestAction": { - "base": "Contains information about the DNS request.
", + "base": "Contains information about the DNS_REQUEST action described in this finding.
", "refs": { "Action$DnsRequestAction": "Information about the DNS_REQUEST action described in this finding.
" } @@ -420,7 +489,7 @@ "Eq": { "base": null, "refs": { - "Condition$Eq": "Deprecated. Represents the equal condition to be applied to a single field when querying for findings.
" + "Condition$Eq": "Represents the equal condition to be applied to a single field when querying for findings.
" } }, "Equals": { @@ -438,7 +507,7 @@ "Feedback": { "base": null, "refs": { - "UpdateFindingsFeedbackRequest$Feedback": "Valid values: USEFUL | NOT_USEFUL
" + "UpdateFindingsFeedbackRequest$Feedback": "The feedback for the finding.
" } }, "FilterAction": { @@ -482,18 +551,18 @@ } }, "Finding": { - "base": "Contains information about the finding.
", + "base": "Contains information about the finding, which is generated when abnormal or suspicious activity is detected.
", "refs": { "Findings$member": null } }, "FindingCriteria": { - "base": "Contains finding criteria information.
", + "base": "Contains information about the criteria used for querying findings.
", "refs": { "CreateFilterRequest$FindingCriteria": "Represents the criteria to be used in the filter for querying findings.
", "GetFilterResponse$FindingCriteria": "Represents the criteria to be used in the filter for querying findings.
", "GetFindingsStatisticsRequest$FindingCriteria": "Represents the criteria used for querying findings.
", - "ListFindingsRequest$FindingCriteria": "Represents the criteria used for querying findings.
", + "ListFindingsRequest$FindingCriteria": "Represents the criteria used for querying findings. Valid values include:
JSON field name
accountId
region
confidence
id
resource.accessKeyDetails.accessKeyId
resource.accessKeyDetails.principalId
resource.accessKeyDetails.userName
resource.accessKeyDetails.userType
resource.instanceDetails.iamInstanceProfile.id
resource.instanceDetails.imageId
resource.instanceDetails.instanceId
resource.instanceDetails.networkInterfaces.ipv6Addresses
resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress
resource.instanceDetails.networkInterfaces.publicDnsName
resource.instanceDetails.networkInterfaces.publicIp
resource.instanceDetails.networkInterfaces.securityGroups.groupId
resource.instanceDetails.networkInterfaces.securityGroups.groupName
resource.instanceDetails.networkInterfaces.subnetId
resource.instanceDetails.networkInterfaces.vpcId
resource.instanceDetails.tags.key
resource.instanceDetails.tags.value
resource.resourceType
service.action.actionType
service.action.awsApiCallAction.api
service.action.awsApiCallAction.callerType
service.action.awsApiCallAction.remoteIpDetails.city.cityName
service.action.awsApiCallAction.remoteIpDetails.country.countryName
service.action.awsApiCallAction.remoteIpDetails.ipAddressV4
service.action.awsApiCallAction.remoteIpDetails.organization.asn
service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg
service.action.awsApiCallAction.serviceName
service.action.dnsRequestAction.domain
service.action.networkConnectionAction.blocked
service.action.networkConnectionAction.connectionDirection
service.action.networkConnectionAction.localPortDetails.port
service.action.networkConnectionAction.protocol
service.action.networkConnectionAction.remoteIpDetails.city.cityName
service.action.networkConnectionAction.remoteIpDetails.country.countryName
service.action.networkConnectionAction.remoteIpDetails.ipAddressV4
service.action.networkConnectionAction.remoteIpDetails.organization.asn
service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg
service.action.networkConnectionAction.remotePortDetails.port
service.additionalInfo.threatListName
service.archived
When this attribute is set to 'true', only archived findings are listed. When it's set to 'false', only unarchived findings are listed. When this attribute is not set, all existing findings are listed.
service.resourceRole
severity
type
updatedAt
Type: Timestamp in Unix Epoch millisecond format: 1486685375000
Represents the criteria to be used in the filter for querying findings.
" } }, @@ -509,7 +578,7 @@ "ArchiveFindingsRequest$FindingIds": "IDs of the findings that you want to archive.
", "GetFindingsRequest$FindingIds": "IDs of the findings that you want to retrieve.
", "ListFindingsResponse$FindingIds": "The IDs of the findings you are listing.
", - "UnarchiveFindingsRequest$FindingIds": "IDs of the findings that you want to unarchive.
", + "UnarchiveFindingsRequest$FindingIds": "IDs of the findings to unarchive.
", "UpdateFindingsFeedbackRequest$FindingIds": "IDs of the findings that you want to mark as useful or not useful.
" } }, @@ -518,7 +587,7 @@ "refs": { "CreateDetectorRequest$FindingPublishingFrequency": "A enum value that specifies how frequently customer got Finding updates published.
", "GetDetectorResponse$FindingPublishingFrequency": "Finding publishing frequency.
", - "UpdateDetectorRequest$FindingPublishingFrequency": "A enum value that specifies how frequently customer got Finding updates published.
" + "UpdateDetectorRequest$FindingPublishingFrequency": "A enum value that specifies how frequently findings are exported, such as to CloudWatch Events.
" } }, "FindingStatisticType": { @@ -549,7 +618,7 @@ "FindingTypes": { "base": null, "refs": { - "CreateSampleFindingsRequest$FindingTypes": "Types of sample findings that you want to generate.
" + "CreateSampleFindingsRequest$FindingTypes": "Types of sample findings to generate.
" } }, "Findings": { @@ -559,7 +628,7 @@ } }, "GeoLocation": { - "base": "Contains information about the
", + "base": "Contains information about the location of the remote IP address.
", "refs": { "RemoteIpDetails$GeoLocation": "Location information of the remote IP address.
" } @@ -658,12 +727,12 @@ "base": null, "refs": { "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) for the given GuardDuty resource
", - "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) for the given GuardDuty resource
", - "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) for the given GuardDuty resource
" + "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) for the GuardDuty resource to apply a tag to.
", + "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) for the resource to remove tags from.
" } }, "IamInstanceProfile": { - "base": "Contains information about the instance profile.
", + "base": "Contains information about the EC2 instance profile.
", "refs": { "InstanceDetails$IamInstanceProfile": "The profile information of the EC2 instance.
" } @@ -677,10 +746,10 @@ "Integer": { "base": null, "refs": { - "Condition$Gt": "Deprecated. Represents a greater than condition to be applied to a single field when querying for findings.
", - "Condition$Gte": "Deprecated. Represents a greater than equal condition to be applied to a single field when querying for findings.
", - "Condition$Lt": "Deprecated. Represents a less than condition to be applied to a single field when querying for findings.
", - "Condition$Lte": "Deprecated. Represents a less than equal condition to be applied to a single field when querying for findings.
", + "Condition$Gt": "Represents a greater than condition to be applied to a single field when querying for findings.
", + "Condition$Gte": "Represents a greater than equal condition to be applied to a single field when querying for findings.
", + "Condition$Lt": "Represents a less than condition to be applied to a single field when querying for findings.
", + "Condition$Lte": "Represents a less than equal condition to be applied to a single field when querying for findings.
", "CountBySeverity$value": null, "GetInvitationsCountResponse$InvitationsCount": "The number of received invitations.
", "LocalPortDetails$Port": "Port number of the local connection.
", @@ -694,7 +763,7 @@ } }, "Invitation": { - "base": "Contains information about the invitation.
", + "base": "Contains information about the invitation to become a member account.
", "refs": { "Invitations$member": null } @@ -800,6 +869,16 @@ "refs": { } }, + "ListPublishingDestinationsRequest": { + "base": null, + "refs": { + } + }, + "ListPublishingDestinationsResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -844,7 +923,8 @@ "Condition$GreaterThan": "Represents a greater than condition to be applied to a single field when querying for findings.
", "Condition$GreaterThanOrEqual": "Represents a greater than equal condition to be applied to a single field when querying for findings.
", "Condition$LessThan": "Represents a less than condition to be applied to a single field when querying for findings.
", - "Condition$LessThanOrEqual": "Represents a less than equal condition to be applied to a single field when querying for findings.
" + "Condition$LessThanOrEqual": "Represents a less than equal condition to be applied to a single field when querying for findings.
", + "DescribePublishingDestinationResponse$PublishingFailureStartTimestamp": "The time, in epoch millisecond format, at which GuardDuty was first unable to publish findings to the destination.
" } }, "Master": { @@ -862,6 +942,7 @@ "ListIPSetsRequest$MaxResults": "You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.
", "ListInvitationsRequest$MaxResults": "You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.
", "ListMembersRequest$MaxResults": "You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.
", + "ListPublishingDestinationsRequest$MaxResults": "The maximum number of results to return in the response.
", "ListThreatIntelSetsRequest$MaxResults": "You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.
" } }, @@ -883,7 +964,7 @@ "refs": { "CreateIPSetRequest$Name": "The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.
", "CreateThreatIntelSetRequest$Name": "A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.
", - "GetIPSetResponse$Name": "The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.
", + "GetIPSetResponse$Name": "The user friendly name for the IPSet.
", "GetThreatIntelSetResponse$Name": "A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.
", "UpdateIPSetRequest$Name": "The unique ID that specifies the IPSet that you want to update.
", "UpdateThreatIntelSetRequest$Name": "The unique ID that specifies the ThreatIntelSet that you want to update.
" @@ -892,17 +973,17 @@ "Neq": { "base": null, "refs": { - "Condition$Neq": "Deprecated. Represents the not equal condition to be applied to a single field when querying for findings.
" + "Condition$Neq": "Represents the not equal condition to be applied to a single field when querying for findings.
" } }, "NetworkConnectionAction": { - "base": "Contains information about the network connection.
", + "base": "Contains information about the NETWORK_CONNECTION action described in the finding.
", "refs": { "Action$NetworkConnectionAction": "Information about the NETWORK_CONNECTION action described in this finding.
" } }, "NetworkInterface": { - "base": "Contains information about the network interface.
", + "base": "Contains information about the network interface of the Ec2 instance.
", "refs": { "NetworkInterfaces$member": null } @@ -926,13 +1007,13 @@ } }, "Organization": { - "base": "Continas information about the organization.
", + "base": "Continas information about the ISP organization of the remote IP address.
", "refs": { "RemoteIpDetails$Organization": "ISP Organization information of the remote IP address.
" } }, "PortProbeAction": { - "base": "Contains information about the port probe.
", + "base": "Contains information about the PORT_PROBE action described in the finding.
", "refs": { "Action$PortProbeAction": "Information about the PORT_PROBE action described in this finding.
" } @@ -950,7 +1031,7 @@ } }, "PrivateIpAddressDetails": { - "base": "Contains information about the private IP address.
", + "base": "Contains other private IP address information of the EC2 instance.
", "refs": { "PrivateIpAddresses$member": null } @@ -962,7 +1043,7 @@ } }, "ProductCode": { - "base": "Contains information about the product code.
", + "base": "Contains information about the product code for the Ec2 instance.
", "refs": { "ProductCodes$member": null } @@ -973,8 +1054,15 @@ "InstanceDetails$ProductCodes": "The product code of the EC2 instance.
" } }, + "PublishingStatus": { + "base": null, + "refs": { + "DescribePublishingDestinationResponse$Status": "The status of the publishing destination.
", + "Destination$Status": "The status of the publishing destination.
" + } + }, "RemoteIpDetails": { - "base": "Continas information about the remote IP address.
", + "base": "Continas information about the remote IP address of the connection.
", "refs": { "AwsApiCallAction$RemoteIpDetails": "Remote IP information of the connection.
", "NetworkConnectionAction$RemoteIpDetails": "Remote IP information of the connection.
", @@ -988,13 +1076,13 @@ } }, "Resource": { - "base": "Contains information about the resource.
", + "base": "Contains information about the AWS resource associated with the activity that prompted GuardDuty to generate a finding.
", "refs": { "Finding$Resource": null } }, "SecurityGroup": { - "base": "Contains information about the security group.
", + "base": "Contains information about the security groups associated with the EC2 instance.
", "refs": { "SecurityGroups$member": null } @@ -1006,13 +1094,13 @@ } }, "Service": { - "base": "Contains information about the service.
", + "base": "Contains additional information about the generated finding.
", "refs": { "Finding$Service": null } }, "SortCriteria": { - "base": "Contains information about the criteria for sorting.
", + "base": "Contains information about the criteria used for sorting findings.
", "refs": { "GetFindingsRequest$SortCriteria": "Represents the criteria used for sorting findings.
", "ListFindingsRequest$SortCriteria": "Represents the criteria used for sorting findings.
" @@ -1058,12 +1146,19 @@ "Country$CountryCode": "Country code of the remote IP address.
", "Country$CountryName": "Country name of the remote IP address.
", "CreateIPSetResponse$IpSetId": "The ID of the IPSet resource.
", + "CreatePublishingDestinationResponse$DestinationId": "The ID of the publishing destination created.
", "CreateThreatIntelSetResponse$ThreatIntelSetId": "The ID of the ThreatIntelSet resource.
", "Criterion$key": null, "DeleteFilterRequest$FilterName": "The name of the filter you want to delete.
", - "DeleteIPSetRequest$IpSetId": "The unique ID of the ipSet you want to delete.
", + "DeleteIPSetRequest$IpSetId": "The unique ID of the IPSet to delete.
", + "DeletePublishingDestinationRequest$DestinationId": "The ID of the publishing destination to delete.
", "DeleteThreatIntelSetRequest$ThreatIntelSetId": "The unique ID of the threatIntelSet you want to delete.
", - "DnsRequestAction$Domain": "Domain information for the DNS request.
", + "DescribePublishingDestinationRequest$DestinationId": "The ID of the publishing destination to retrieve.
", + "DescribePublishingDestinationResponse$DestinationId": "The ID of the publishing destination.
", + "Destination$DestinationId": "The unique ID of the publishing destination.
", + "DestinationProperties$DestinationArn": "The ARN of the resource to publish to.
", + "DestinationProperties$KmsKeyArn": "The ARN of the KMS key to use for encryption.
", + "DnsRequestAction$Domain": "Domain information for the API request.
", "DomainDetails$Domain": "Domain information for the AWS API call.
", "Eq$member": null, "Equals$member": null, @@ -1081,7 +1176,7 @@ "GetDetectorResponse$ServiceRole": "The GuardDuty service role.
", "GetDetectorResponse$UpdatedAt": "Detector last update timestamp.
", "GetFilterRequest$FilterName": "The name of the filter you want to get.
", - "GetIPSetRequest$IpSetId": "The unique ID of the ipSet you want to get.
", + "GetIPSetRequest$IpSetId": "The unique ID of the IPSet to retrieve.
", "GetThreatIntelSetRequest$ThreatIntelSetId": "The unique ID of the threatIntelSet you want to get.
", "IamInstanceProfile$Arn": "AWS EC2 instance profile ARN.
", "IamInstanceProfile$Id": "AWS EC2 instance profile ID.
", @@ -1095,9 +1190,9 @@ "InstanceDetails$Platform": "The platform of the EC2 instance.
", "InternalServerErrorException$Message": "The error message.
", "InternalServerErrorException$Type": "The error type.
", - "Invitation$InvitationId": "This value is used to validate the inviter account to the member account.
", + "Invitation$InvitationId": "The ID of the invitation. This value is used to validate the inviter account to the member account.
", "Invitation$RelationshipStatus": "The status of the relationship between the inviter and invitee accounts.
", - "Invitation$InvitedAt": "Timestamp at which the invitation was sent
", + "Invitation$InvitedAt": "Timestamp at which the invitation was sent.
", "InviteMembersRequest$Message": "The invitation message that you want to send to the accounts that you’re inviting to GuardDuty as members.
", "IpSetIds$member": null, "Ipv6Addresses$member": null, @@ -1114,7 +1209,9 @@ "ListMembersRequest$NextToken": "You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.
", "ListMembersRequest$OnlyAssociated": "Specifies whether to only return associated members or to return all members (including members which haven't been invited yet or have been disassociated).
", "ListMembersResponse$NextToken": "Pagination parameter to be used on the next list operation to retrieve more items.
", - "ListThreatIntelSetsRequest$NextToken": "You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.
", + "ListPublishingDestinationsRequest$NextToken": "A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken
value returned from the previous request to continue listing results after the first page.
A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken
value returned from the previous request to continue listing results after the first page.
You can use this parameter to paginate results in the response. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.
", "ListThreatIntelSetsResponse$NextToken": "Pagination parameter to be used on the next list operation to retrieve more items.
", "LocalPortDetails$PortName": "Port name of the local connection.
", "Master$InvitationId": "This value is used to validate the master account to the member account.
", @@ -1163,11 +1260,12 @@ "UpdateFilterRequest$FilterName": "The name of the filter.
", "UpdateFindingsFeedbackRequest$Comments": "Additional feedback about the GuardDuty findings.
", "UpdateIPSetRequest$IpSetId": "The unique ID that specifies the IPSet that you want to update.
", + "UpdatePublishingDestinationRequest$DestinationId": "The ID of the detector associated with the publishing destinations to update.
", "UpdateThreatIntelSetRequest$ThreatIntelSetId": "The unique ID that specifies the ThreatIntelSet that you want to update.
" } }, "Tag": { - "base": "Contains information about the tag associated with the resource.
", + "base": "Contains information about a tag associated with the Ec2 instance.
", "refs": { "Tags$member": null } @@ -1182,7 +1280,7 @@ "TagKeyList": { "base": null, "refs": { - "UntagResourceRequest$TagKeys": "The tag keys to remove from a resource.
" + "UntagResourceRequest$TagKeys": "The tag keys to remove from the resource.
" } }, "TagMap": { @@ -1339,6 +1437,16 @@ "refs": { } }, + "UpdatePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "UpdatePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "UpdateThreatIntelSetRequest": { "base": null, "refs": { diff --git a/models/apis/guardduty/2017-11-28/paginators-1.json b/models/apis/guardduty/2017-11-28/paginators-1.json index 1e70a2ccc09..717e540366d 100644 --- a/models/apis/guardduty/2017-11-28/paginators-1.json +++ b/models/apis/guardduty/2017-11-28/paginators-1.json @@ -36,6 +36,11 @@ "limit_key": "MaxResults", "result_key": "Members" }, + "ListPublishingDestinations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListThreatIntelSets": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/iam/2010-05-08/api-2.json b/models/apis/iam/2010-05-08/api-2.json index a1def8d9103..05eeccc5444 100644 --- a/models/apis/iam/2010-05-08/api-2.json +++ b/models/apis/iam/2010-05-08/api-2.json @@ -4588,7 +4588,8 @@ "Description":{"shape":"roleDescriptionType"}, "MaxSessionDuration":{"shape":"roleMaxSessionDurationType"}, "PermissionsBoundary":{"shape":"AttachedPermissionsBoundary"}, - "Tags":{"shape":"tagListType"} + "Tags":{"shape":"tagListType"}, + "RoleLastUsed":{"shape":"RoleLastUsed"} } }, "RoleDetail":{ @@ -4604,7 +4605,15 @@ "RolePolicyList":{"shape":"policyDetailListType"}, "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"}, "PermissionsBoundary":{"shape":"AttachedPermissionsBoundary"}, - "Tags":{"shape":"tagListType"} + "Tags":{"shape":"tagListType"}, + "RoleLastUsed":{"shape":"RoleLastUsed"} + } + }, + "RoleLastUsed":{ + "type":"structure", + "members":{ + "LastUsedDate":{"shape":"dateType"}, + "Region":{"shape":"stringType"} } }, "RoleUsageListType":{ diff --git a/models/apis/iam/2010-05-08/docs-2.json b/models/apis/iam/2010-05-08/docs-2.json index e6f5ac271bc..6bbc6cad38c 100644 --- a/models/apis/iam/2010-05-08/docs-2.json +++ b/models/apis/iam/2010-05-08/docs-2.json @@ -234,7 +234,7 @@ } }, "AttachedPolicy": { - "base": "Contains information about an attached policy.
An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about an attached policy.
An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "attachedPoliciesListType$member": null } @@ -1328,7 +1328,7 @@ } }, "ManagedPolicyDetail": { - "base": "Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.
This data type is used as a response element in the GetAccountAuthorizationDetails operation.
For more information about managed policies, see Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.
This data type is used as a response element in the GetAccountAuthorizationDetails operation.
For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "ManagedPolicyDetailListType$member": null } @@ -1387,7 +1387,7 @@ } }, "Policy": { - "base": "Contains information about a managed policy.
This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a managed policy.
This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "CreatePolicyResponse$Policy": "A structure containing details about the new policy.
", "GetPolicyResponse$Policy": "A structure containing details about the policy.
", @@ -1420,7 +1420,7 @@ } }, "PolicyGroup": { - "base": "Contains information about a group that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a group that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "PolicyGroupListType$member": null } @@ -1443,7 +1443,7 @@ } }, "PolicyRole": { - "base": "Contains information about a role that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a role that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "PolicyRoleListType$member": null } @@ -1468,7 +1468,7 @@ } }, "PolicyUser": { - "base": "Contains information about a user that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a user that a managed policy is attached to.
This data type is used as a response element in the ListEntitiesForPolicy operation.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "PolicyUserListType$member": null } @@ -1480,7 +1480,7 @@ } }, "PolicyVersion": { - "base": "Contains information about a version of a managed policy.
This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.
", + "base": "Contains information about a version of a managed policy.
This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.
For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.
", "refs": { "CreatePolicyVersionResponse$PolicyVersion": "A structure containing details about the new policy version.
", "GetPolicyVersionResponse$PolicyVersion": "A structure containing details about the policy version.
", @@ -1644,6 +1644,13 @@ "roleDetailListType$member": null } }, + "RoleLastUsed": { + "base": "Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.
This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.
", + "refs": { + "Role$RoleLastUsed": "Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.
", + "RoleDetail$RoleLastUsed": "Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.
" + } + }, "RoleUsageListType": { "base": null, "refs": { @@ -2048,9 +2055,9 @@ "GetPolicyRequest$PolicyArn": "The Amazon Resource Name (ARN) of the managed policy that you want information about.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "GetPolicyVersionRequest$PolicyArn": "The Amazon Resource Name (ARN) of the managed policy that you want information about.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "GetSAMLProviderRequest$SAMLProviderArn": "The Amazon Resource Name (ARN) of the SAML provider resource object in IAM to get information about.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", - "Group$Arn": "The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.
", + "Group$Arn": "The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.
", "GroupDetail$Arn": null, - "InstanceProfile$Arn": "The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.
", + "InstanceProfile$Arn": "The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.
", "ListEntitiesForPolicyRequest$PolicyArn": "The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "ListPoliciesGrantingServiceAccessRequest$Arn": "The ARN of the IAM identity (user, group, or role) whose policies you want to list.
", "ListPolicyVersionsRequest$PolicyArn": "The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", @@ -2064,14 +2071,14 @@ "Role$Arn": "The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide guide.
", "RoleDetail$Arn": null, "SAMLProviderListEntry$Arn": "The Amazon Resource Name (ARN) of the SAML provider.
", - "ServerCertificateMetadata$Arn": "The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.
", + "ServerCertificateMetadata$Arn": "The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.
", "ServiceLastAccessed$LastAuthenticatedEntity": "The ARN of the authenticated entity (user or role) that last attempted to access the service. AWS does not report unauthenticated requests.
This field is null if no IAM entities attempted to access the service within the reporting period.
", "SetDefaultPolicyVersionRequest$PolicyArn": "The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "SimulatePrincipalPolicyRequest$PolicySourceArn": "The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "UpdateOpenIDConnectProviderThumbprintRequest$OpenIDConnectProviderArn": "The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "UpdateSAMLProviderRequest$SAMLProviderArn": "The Amazon Resource Name (ARN) of the SAML provider to update.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.
", "UpdateSAMLProviderResponse$SAMLProviderArn": "The Amazon Resource Name (ARN) of the SAML provider that was updated.
", - "User$Arn": "The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the Using IAM guide.
", + "User$Arn": "The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.
", "UserDetail$Arn": null } }, @@ -2272,6 +2279,7 @@ "PolicyVersion$CreateDate": "The date and time, in ISO 8601 date-time format, when the policy version was created.
", "Role$CreateDate": "The date and time, in ISO 8601 date-time format, when the role was created.
", "RoleDetail$CreateDate": "The date and time, in ISO 8601 date-time format, when the role was created.
", + "RoleLastUsed$LastUsedDate": "The date and time, in ISO 8601 date-time format that the role was last used.
This field is null if the role has not been used within the IAM tracking period. For more information about the tracking period, see Regions Where Data Is Tracked in the IAM User Guide.
", "SAMLProviderListEntry$ValidUntil": "The expiration date and time for the SAML provider.
", "SAMLProviderListEntry$CreateDate": "The date and time when the SAML provider was created.
", "SSHPublicKey$UploadDate": "The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.
", @@ -2283,7 +2291,7 @@ "ServiceSpecificCredentialMetadata$CreateDate": "The date and time, in ISO 8601 date-time format, when the service-specific credential were created.
", "SigningCertificate$UploadDate": "The date when the signing certificate was uploaded.
", "User$CreateDate": "The date and time, in ISO 8601 date-time format, when the user was created.
", - "User$PasswordLastUsed": "The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:
The user never had a password.
A password exists but has not been used since IAM started tracking this information on October 20, 2014.
A null value does not mean that the user never had a password. Also, if the user does not currently have a password, but had one in the past, then this field contains the date and time the most recent password was used.
This value is returned only in the GetUser and ListUsers operations.
", + "User$PasswordLastUsed": "The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:
The user never had a password.
A password exists but has not been used since IAM started tracking this information on October 20, 2014.
A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.
This value is returned only in the GetUser and ListUsers operations.
", "UserDetail$CreateDate": "The date and time, in ISO 8601 date-time format, when the user was created.
", "VirtualMFADevice$EnableDate": "The date and time on which the virtual MFA device was enabled.
" } @@ -2428,19 +2436,19 @@ "base": null, "refs": { "EntityInfo$Id": "The identifier of the entity (user or role).
", - "Group$GroupId": "The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "GroupDetail$GroupId": "The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "InstanceProfile$InstanceProfileId": "The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "ManagedPolicyDetail$PolicyId": "The stable and unique string identifying the policy.
For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "Policy$PolicyId": "The stable and unique string identifying the policy.
For more information about IDs, see IAM Identifiers in the Using IAM guide.
", + "Group$GroupId": "The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "GroupDetail$GroupId": "The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "InstanceProfile$InstanceProfileId": "The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "ManagedPolicyDetail$PolicyId": "The stable and unique string identifying the policy.
For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "Policy$PolicyId": "The stable and unique string identifying the policy.
For more information about IDs, see IAM Identifiers in the IAM User Guide.
", "PolicyGroup$GroupId": "The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", "PolicyRole$RoleId": "The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", "PolicyUser$UserId": "The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", - "Role$RoleId": "The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "RoleDetail$RoleId": "The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "ServerCertificateMetadata$ServerCertificateId": "The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "User$UserId": "The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.
", - "UserDetail$UserId": "The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.
" + "Role$RoleId": "The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "RoleDetail$RoleId": "The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "ServerCertificateMetadata$ServerCertificateId": "The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "User$UserId": "The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.
", + "UserDetail$UserId": "The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.
" } }, "instanceProfileListType": { @@ -2702,20 +2710,20 @@ "CreateRoleRequest$Path": "The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.
This parameter is optional. If it is not included, it defaults to a slash (/).
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "CreateUserRequest$Path": "The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.
This parameter is optional. If it is not included, it defaults to a slash (/).
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "CreateVirtualMFADeviceRequest$Path": "The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.
This parameter is optional. If it is not included, it defaults to a slash (/).
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", - "EntityInfo$Path": "The path to the entity (user or role). For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "Group$Path": "The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "GroupDetail$Path": "The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "InstanceProfile$Path": "The path to the instance profile. For more information about paths, see IAM Identifiers in the Using IAM guide.
", + "EntityInfo$Path": "The path to the entity (user or role). For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "Group$Path": "The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "GroupDetail$Path": "The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "InstanceProfile$Path": "The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.
", "ListEntitiesForPolicyRequest$PathPrefix": "The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", - "Role$Path": "The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "RoleDetail$Path": "The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "ServerCertificateMetadata$Path": "The path to the server certificate. For more information about paths, see IAM Identifiers in the Using IAM guide.
", + "Role$Path": "The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "RoleDetail$Path": "The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "ServerCertificateMetadata$Path": "The path to the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.
", "UpdateGroupRequest$NewPath": "New path for the IAM group. Only include this if changing the group's path.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "UpdateServerCertificateRequest$NewPath": "The new path for the server certificate. Include this only if you are updating the server certificate's path.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "UpdateUserRequest$NewPath": "New path for the IAM user. Include this parameter only if you're changing the user's path.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "UploadServerCertificateRequest$Path": "The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.
This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path
parameter. The path must begin with /cloudfront
and must include a trailing slash (for example, /cloudfront/test/
).
The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "UserDetail$Path": "The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.
" + "User$Path": "The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "UserDetail$Path": "The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.
" } }, "policyDescriptionType": { @@ -2834,8 +2842,8 @@ "ListAttachedRolePoliciesRequest$PathPrefix": "The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "ListAttachedUserPoliciesRequest$PathPrefix": "The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.
This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", "ListPoliciesRequest$PathPrefix": "The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.
", - "ManagedPolicyDetail$Path": "The path to the policy.
For more information about paths, see IAM Identifiers in the Using IAM guide.
", - "Policy$Path": "The path to the policy.
For more information about paths, see IAM Identifiers in the Using IAM guide.
" + "ManagedPolicyDetail$Path": "The path to the policy.
For more information about paths, see IAM Identifiers in the IAM User Guide.
", + "Policy$Path": "The path to the policy.
For more information about paths, see IAM Identifiers in the IAM User Guide.
" } }, "policyScopeType": { @@ -2855,7 +2863,7 @@ "refs": { "DeletePolicyVersionRequest$VersionId": "The policy version to delete.
This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.
For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.
", "GetPolicyVersionRequest$VersionId": "Identifies the policy version to retrieve.
This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.
", - "ManagedPolicyDetail$DefaultVersionId": "The identifier for the version of the policy that is set as the default (operative) version.
For more information about policy versions, see Versioning for Managed Policies in the Using IAM guide.
", + "ManagedPolicyDetail$DefaultVersionId": "The identifier for the version of the policy that is set as the default (operative) version.
For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.
", "Policy$DefaultVersionId": "The identifier for the version of the policy that is set as the default version.
", "PolicyVersion$VersionId": "The identifier for the policy version.
Policy version identifiers always begin with v
(always lowercase). When a policy is created, the first policy version is v1
.
The version of the policy to set as the default (operative) version.
For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.
" @@ -3117,7 +3125,8 @@ "AccessKeyLastUsed$ServiceName": "The name of the AWS service with which this access key was most recently used. The value of this field is \"N/A\" in the following situations:
The user does not have an access key.
An access key exists but has not been used since IAM started tracking this information.
There is no sign-in data associated with the user.
The AWS Region where this access key was most recently used. The value for this field is \"N/A\" in the following situations:
The user does not have an access key.
An access key exists but has not been used since IAM began tracking this information.
There is no sign-in data associated with the user.
For more information about AWS Regions, see Regions and Endpoints in the Amazon Web Services General Reference.
", "ErrorDetails$Message": "Detailed information about the reason that the operation failed.
", - "ErrorDetails$Code": "The error code associated with the operation failure.
" + "ErrorDetails$Code": "The error code associated with the operation failure.
", + "RoleLastUsed$Region": "The name of the AWS Region in which the role was last used.
" } }, "summaryKeyType": { @@ -3182,7 +3191,7 @@ } }, "thumbprintType": { - "base": "Contains a thumbprint for an identity provider's server certificate.
The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.
", + "base": "Contains a thumbprint for an identity provider's server certificate.
The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate. This thumbprint is used by the domain where the OpenID Connect provider makes its keys available. The thumbprint is always a 40-character string.
", "refs": { "thumbprintListType$member": null } diff --git a/models/apis/iam/2010-05-08/examples-1.json b/models/apis/iam/2010-05-08/examples-1.json index 928dc2132a0..cd3a94aa608 100644 --- a/models/apis/iam/2010-05-08/examples-1.json +++ b/models/apis/iam/2010-05-08/examples-1.json @@ -733,8 +733,13 @@ "Arn": "arn:aws:iam::123456789012:role/Test-Role", "AssumeRolePolicyDocument": "Cancels a job.
", "CancelJobExecution": "Cancels the execution of a job for a given thing.
", "ClearDefaultAuthorizer": "Clears the default authorizer.
", + "ConfirmTopicRuleDestination": "Confirms a topic rule destination. When you create a rule requiring a destination, AWS IoT sends a confirmation message to the endpoint or base address you specify. The message includes a token which you pass back when calling ConfirmTopicRuleDestination
to confirm that you own or have access to the endpoint.
Creates an authorizer.
", "CreateBillingGroup": "Creates a billing group.
", "CreateCertificateFromCsr": "Creates an X.509 certificate using the specified certificate signing request.
Note: The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves.
Note: Reusing the same certificate signing request (CSR) results in a distinct certificate.
You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs.
Assuming a set of CSRs are located inside of the directory my-csr-directory:
On Linux and OS X, the command is:
$ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}
This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr AWS CLI command to create a certificate for the corresponding CSR.
The aws iot create-certificate-from-csr part of the command can also be run in parallel to speed up the certificate creation process:
$ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}
On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:
> ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}
On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:
> forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"
", @@ -29,11 +30,12 @@ "CreateRoleAlias": "Creates a role alias.
", "CreateScheduledAudit": "Creates a scheduled audit that is run at a specified time interval.
", "CreateSecurityProfile": "Creates a Device Defender security profile.
", - "CreateStream": "Creates a stream for delivering one or more large files in chunks over MQTT. A stream transports data bytes in chunks or blocks packaged as MQTT messages from a source like S3. You can have one or more files associated with a stream. The total size of a file associated with the stream cannot exceed more than 2 MB. The stream will be created with version 0. If a stream is created with the same streamID as a stream that existed and was deleted within last 90 days, we will resurrect that old stream by incrementing the version by 1.
", + "CreateStream": "Creates a stream for delivering one or more large files in chunks over MQTT. A stream transports data bytes in chunks or blocks packaged as MQTT messages from a source like S3. You can have one or more files associated with a stream.
", "CreateThing": "Creates a thing record in the registry. If this call is made multiple times using the same thing name and configuration, the call will succeed. If this call is made with the same thing name but different configuration a ResourceAlreadyExistsException
is thrown.
This is a control plane operation. See Authorization for information about authorizing control plane actions.
Create a thing group.
This is a control plane operation. See Authorization for information about authorizing control plane actions.
Creates a new thing type.
", "CreateTopicRule": "Creates a rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.
", + "CreateTopicRuleDestination": "Creates a topic rule destination. The destination must be confirmed prior to use.
", "DeleteAccountAuditConfiguration": "Restores the default settings for Device Defender audits for this account. Any configuration data you entered is deleted and all audit checks are reset to disabled.
", "DeleteAuthorizer": "Deletes an authorizer.
", "DeleteBillingGroup": "Deletes the billing group.
", @@ -55,6 +57,7 @@ "DeleteThingGroup": "Deletes a thing group.
", "DeleteThingType": "Deletes the specified thing type. You cannot delete a thing type if it has things associated with it. To delete a thing type, first mark it as deprecated by calling DeprecateThingType, then remove any associated things by calling UpdateThing to change the thing type on any associated thing, and finally use DeleteThingType to delete the thing type.
", "DeleteTopicRule": "Deletes the rule.
", + "DeleteTopicRuleDestination": "Deletes a topic rule destination.
", "DeleteV2LoggingLevel": "Deletes a logging level.
", "DeprecateThingType": "Deprecates a thing type. You can not associate new things with deprecated thing type.
", "DescribeAccountAuditConfiguration": "Gets information about the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.
", @@ -86,16 +89,19 @@ "DetachThingPrincipal": "Detaches the specified principal from the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.
This call is asynchronous. It might take several seconds for the detachment to propagate.
Disables the rule.
", "EnableTopicRule": "Enables the rule.
", + "GetCardinality": "Returns the approximate count of unique values that match the query.
", "GetEffectivePolicies": "Gets a list of the policies that have an effect on the authorization behavior of the specified device when it connects to the AWS IoT device gateway.
", "GetIndexingConfiguration": "Gets the search configuration.
", "GetJobDocument": "Gets a job document.
", "GetLoggingOptions": "Gets the logging options.
NOTE: use of this command is not recommended. Use GetV2LoggingOptions
instead.
Gets an OTA update.
", + "GetPercentiles": "Groups the aggregated values that match the query into percentile groupings. The default percentile groupings are: 1,5,25,50,75,95,99, although you can specify your own when you call GetPercentiles
. This function returns a value for each percentile group specified (or the default percentile groupings). The percentile group \"1\" contains the aggregated field value that occurs in approximately one percent of the values that match the query. The percentile group \"5\" contains the aggregated field value that occurs in approximately five percent of the values that match the query, and so on. The result is an approximation, the more values that match the query, the more accurate the percentile values.
Gets information about the specified policy with the policy document of the default version.
", "GetPolicyVersion": "Gets information about the specified policy version.
", "GetRegistrationCode": "Gets a registration code used to register a CA certificate with AWS IoT.
", "GetStatistics": "Gets statistics about things that match the specified query.
", "GetTopicRule": "Gets information about the rule.
", + "GetTopicRuleDestination": "Gets information about a topic rule destination.
", "GetV2LoggingOptions": "Gets the fine grained logging options.
", "ListActiveViolations": "Lists the active violations for a given Device Defender security profile.
", "ListAttachedPolicies": "Lists the policies attached to the specified thing group.
", @@ -137,6 +143,7 @@ "ListThings": "Lists your things. Use the attributeName and attributeValue parameters to filter your things. For example, calling ListThings
with attributeName=Color and attributeValue=Red retrieves all things in the registry that contain an attribute Color with the value Red.
Lists the things you have added to the given billing group.
", "ListThingsInThingGroup": "Lists the things in the specified group.
", + "ListTopicRuleDestinations": "Lists all the topic rule destinations in your AWS account.
", "ListTopicRules": "Lists the rules for the specific topic.
", "ListV2LoggingLevels": "Lists logging levels.
", "ListViolationEvents": "Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior, or thing (device).
", @@ -179,6 +186,7 @@ "UpdateThing": "Updates the data for a thing.
", "UpdateThingGroup": "Update a thing group.
", "UpdateThingGroupsForThing": "Updates the groups to which the thing belongs.
", + "UpdateTopicRuleDestination": "Updates a topic rule destination. You use this to change the status, endpoint URL, or confirmation URL of the destination.
", "ValidateSecurityProfileBehaviors": "Validates a Device Defender security profile behaviors specification.
" }, "shapes": { @@ -297,6 +305,8 @@ "AggregationField": { "base": null, "refs": { + "GetCardinalityRequest$aggregationField": "The field to aggregate.
", + "GetPercentilesRequest$aggregationField": "The field to aggregate.
", "GetStatisticsRequest$aggregationField": "The aggregation field name. Currently not supported.
" } }, @@ -761,6 +771,12 @@ "UpdateCACertificateRequest$newAutoRegistrationStatus": "The new value for the auto registration status. Valid values are: \"ENABLE\" or \"DISABLE\".
" } }, + "Average": { + "base": null, + "refs": { + "Statistics$average": "The average of the aggregated fields. If the field data type is String this value is indeterminate.
" + } + }, "AwsAccountId": { "base": null, "refs": { @@ -778,11 +794,13 @@ "refs": { "CloudwatchAlarmAction$roleArn": "The IAM role that allows access to the CloudWatch alarm.
", "CloudwatchMetricAction$roleArn": "The IAM role that allows access to the CloudWatch metric.
", + "DeleteTopicRuleDestinationRequest$arn": "The ARN of the topic rule destination to delete.
", "DynamoDBAction$roleArn": "The ARN of the IAM role that grants access to the DynamoDB table.
", "DynamoDBv2Action$roleArn": "The ARN of the IAM role that grants access to the DynamoDB table.
", "ElasticsearchAction$roleArn": "The IAM role ARN that has access to Elasticsearch.
", "FirehoseAction$roleArn": "The IAM role that grants access to the Amazon Kinesis Firehose stream.
", "GetLoggingOptionsResponse$roleArn": "The ARN of the IAM role that grants access.
", + "GetTopicRuleDestinationRequest$arn": "The ARN of the topic rule destination.
", "GetV2LoggingOptionsResponse$roleArn": "The IAM role ARN AWS IoT uses to write to your CloudWatch logs.
", "IotAnalyticsAction$channelArn": "(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.
", "IotAnalyticsAction$roleArn": "The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).
", @@ -792,10 +810,14 @@ "RepublishAction$roleArn": "The ARN of the IAM role that grants access.
", "S3Action$roleArn": "The ARN of the IAM role that grants access.
", "SetV2LoggingOptionsRequest$roleArn": "The ARN of the role that allows IoT to write to Cloudwatch logs.
", + "SigV4Authorization$roleArn": "The ARN of the signing role.
", "SnsAction$targetArn": "The ARN of the SNS topic.
", "SnsAction$roleArn": "The ARN of the IAM role that grants access.
", "SqsAction$roleArn": "The ARN of the IAM role that grants access.
", - "StepFunctionsAction$roleArn": "The ARN of the role that grants IoT permission to start execution of a state machine (\"Action\":\"states:StartExecution\").
" + "StepFunctionsAction$roleArn": "The ARN of the role that grants IoT permission to start execution of a state machine (\"Action\":\"states:StartExecution\").
", + "TopicRuleDestination$arn": "The topic rule destination URL.
", + "TopicRuleDestinationSummary$arn": "The topic rule destination ARN.
", + "UpdateTopicRuleDestinationRequest$arn": "The ARN of the topic rule destination.
" } }, "AwsIotJobArn": { @@ -1275,6 +1297,22 @@ "EventConfigurations$value": null } }, + "ConfirmTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "ConfirmTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, + "ConfirmationToken": { + "base": null, + "refs": { + "ConfirmTopicRuleDestinationRequest$confirmationToken": "The token used to confirm ownership or access to the topic rule confirmation URL.
" + } + }, "ConflictingResourceUpdateException": { "base": "A conflicting resource update exception. This exception is thrown when two pending updates cause a conflict.
", "refs": { @@ -1303,6 +1341,7 @@ "refs": { "DescribeThingRegistrationTaskResponse$successCount": "The number of things successfully provisioned.
", "DescribeThingRegistrationTaskResponse$failureCount": "The number of things that failed to be provisioned.
", + "GetCardinalityResponse$cardinality": "The approximate count of unique values that match the query.
", "Statistics$count": "The count of things that match the query.
" } }, @@ -1476,6 +1515,16 @@ "refs": { } }, + "CreateTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "CreateTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "CreateTopicRuleRequest": { "base": "The input for the CreateTopicRule operation.
", "refs": { @@ -1791,6 +1840,16 @@ "refs": { } }, + "DeleteTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeleteTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "DeleteTopicRuleRequest": { "base": "The input for the DeleteTopicRule operation.
", "refs": { @@ -2356,6 +2415,33 @@ "JobProcessDetails$numberOfFailedThings": "The number of things that failed executing the job.
" } }, + "Field": { + "base": "The field to aggregate.
", + "refs": { + "Fields$member": null + } + }, + "FieldName": { + "base": null, + "refs": { + "Field$name": "The name of the field.
" + } + }, + "FieldType": { + "base": null, + "refs": { + "Field$type": "The data type of the field.
" + } + }, + "Fields": { + "base": null, + "refs": { + "ThingGroupIndexingConfiguration$managedFields": "A list of automatically indexed thing group fields.
", + "ThingGroupIndexingConfiguration$customFields": "A list of thing group fields to index. This list cannot contain any managed fields. Use the GetIndexingConfiguration API to get a list of managed fields.
", + "ThingIndexingConfiguration$managedFields": "A list of automatically indexed thing fields.
", + "ThingIndexingConfiguration$customFields": "A list of thing fields to index. This list cannot contain any managed fields. Use the GetIndexingConfiguration API to get a list of managed fields.
" + } + }, "FileId": { "base": null, "refs": { @@ -2444,12 +2530,6 @@ "LambdaAction$functionArn": "The ARN of the Lambda function.
" } }, - "GEMaxResults": { - "base": null, - "refs": { - "ListTopicRulesRequest$maxResults": "The maximum number of results to return.
" - } - }, "GenerationId": { "base": null, "refs": { @@ -2459,6 +2539,16 @@ "GetPolicyVersionResponse$generationId": "The generation ID of the policy version.
" } }, + "GetCardinalityRequest": { + "base": null, + "refs": { + } + }, + "GetCardinalityResponse": { + "base": null, + "refs": { + } + }, "GetEffectivePoliciesRequest": { "base": null, "refs": { @@ -2509,6 +2599,16 @@ "refs": { } }, + "GetPercentilesRequest": { + "base": null, + "refs": { + } + }, + "GetPercentilesResponse": { + "base": null, + "refs": { + } + }, "GetPolicyRequest": { "base": "The input for the GetPolicy operation.
", "refs": { @@ -2549,6 +2649,16 @@ "refs": { } }, + "GetTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "GetTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "GetTopicRuleRequest": { "base": "The input for the GetTopicRule operation.
", "refs": { @@ -2594,6 +2704,60 @@ "DynamoDBAction$hashKeyValue": "The hash key value.
" } }, + "HeaderKey": { + "base": null, + "refs": { + "HttpActionHeader$key": "The HTTP header key.
" + } + }, + "HeaderList": { + "base": null, + "refs": { + "HttpAction$headers": "The HTTP headers to send with the message data.
" + } + }, + "HeaderValue": { + "base": null, + "refs": { + "HttpActionHeader$value": "The HTTP header value. Substitution templates are supported.
" + } + }, + "HttpAction": { + "base": "Send data to an HTTPS endpoint.
", + "refs": { + "Action$http": "Send data to an HTTPS endpoint.
" + } + }, + "HttpActionHeader": { + "base": "The HTTP action header.
", + "refs": { + "HeaderList$member": null + } + }, + "HttpAuthorization": { + "base": "The authorization method used to send messages.
", + "refs": { + "HttpAction$auth": "The authentication method to use when sending data to an HTTPS endpoint.
" + } + }, + "HttpUrlDestinationConfiguration": { + "base": "HTTP URL destination configuration used by the topic rule's HTTP action.
", + "refs": { + "TopicRuleDestinationConfiguration$httpUrlConfiguration": "Configuration of the HTTP URL.
" + } + }, + "HttpUrlDestinationProperties": { + "base": "HTTP URL destination properties.
", + "refs": { + "TopicRuleDestination$httpUrlProperties": "Properties of the HTTP URL.
" + } + }, + "HttpUrlDestinationSummary": { + "base": "Information about an HTTP URL destination.
", + "refs": { + "TopicRuleDestinationSummary$httpUrlSummary": "Information about the HTTP URL.
" + } + }, "ImplicitDeny": { "base": "Information that implicitly denies authorization. When policy doesn't explicitly deny or allow an action on a resource it is considered an implicit deny.
", "refs": { @@ -2632,6 +2796,8 @@ "DescribeIndexRequest$indexName": "The index name.
", "DescribeIndexResponse$indexName": "The index name.
", "DescribeThingGroupResponse$indexName": "The dynamic thing group index name.
", + "GetCardinalityRequest$indexName": "The name of the index to search.
", + "GetPercentilesRequest$indexName": "The name of the index to search.
", "GetStatisticsRequest$indexName": "The name of the index to search. The default value is AWS_Things
.
The search index name.
", @@ -3369,6 +3535,16 @@ "refs": { } }, + "ListTopicRuleDestinationsRequest": { + "base": null, + "refs": { + } + }, + "ListTopicRuleDestinationsResponse": { + "base": null, + "refs": { + } + }, "ListTopicRulesRequest": { "base": "The input for the ListTopicRules operation.
", "refs": { @@ -3507,6 +3683,12 @@ "ListViolationEventsRequest$maxResults": "The maximum number of results to return at one time.
" } }, + "Maximum": { + "base": null, + "refs": { + "Statistics$maximum": "The maximum value of the aggregated fields. If the field data type is String this value is indeterminate.
" + } + }, "MaximumPerMinute": { "base": null, "refs": { @@ -3543,6 +3725,12 @@ "ViolationEvent$metricValue": "The value of the metric (the measurement).
" } }, + "Minimum": { + "base": null, + "refs": { + "Statistics$minimum": "The minimum value of the aggregated fields. If the field data type is String this value is indeterminate.
" + } + }, "MinimumNumberOfExecutedThings": { "base": null, "refs": { @@ -3697,6 +3885,8 @@ "ListThingsInThingGroupResponse$nextToken": "The token used to get the next set of results, or null if there are no additional results.
", "ListThingsRequest$nextToken": "The token to retrieve the next set of results.
", "ListThingsResponse$nextToken": "The token used to get the next set of results, or null if there are no additional results.
", + "ListTopicRuleDestinationsRequest$nextToken": "The token to retrieve the next set of results.
", + "ListTopicRuleDestinationsResponse$nextToken": "The token to retrieve the next set of results.
", "ListTopicRulesRequest$nextToken": "A token used to retrieve the next value.
", "ListTopicRulesResponse$nextToken": "A token used to retrieve the next value.
", "ListV2LoggingLevelsRequest$nextToken": "The token used to get the next set of results, or null if there are no additional results.
", @@ -3889,12 +4079,43 @@ "DynamoDBAction$payloadField": "The action payload. This name can be customized.
" } }, + "Percent": { + "base": null, + "refs": { + "PercentList$member": null, + "PercentPair$percent": "The percentile.
" + } + }, + "PercentList": { + "base": null, + "refs": { + "GetPercentilesRequest$percents": "The percentile groups returned.
" + } + }, + "PercentPair": { + "base": "Describes the percentile and percentile value.
", + "refs": { + "Percentiles$member": null + } + }, + "PercentValue": { + "base": null, + "refs": { + "PercentPair$value": "The value of the percentile.
" + } + }, "Percentage": { "base": null, "refs": { "DescribeThingRegistrationTaskResponse$percentageProgress": "The progress of the bulk provisioning task expressed as a percentage.
" } }, + "Percentiles": { + "base": null, + "refs": { + "GetPercentilesResponse$percentiles": "The percentile values of the aggregated fields.
" + } + }, "Platform": { "base": null, "refs": { @@ -4064,7 +4285,7 @@ "base": null, "refs": { "AttachPrincipalPolicyRequest$principal": "The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.
", - "AttachThingPrincipalRequest$principal": "The principal, such as a certificate or other credential.
", + "AttachThingPrincipalRequest$principal": "The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.
", "DetachPrincipalPolicyRequest$principal": "The principal.
If the principal is a certificate, specify the certificate ARN. If the principal is an Amazon Cognito identity, specify the identity ID.
", "DetachThingPrincipalRequest$principal": "If the principal is a certificate, this value must be ARN of the certificate. If the principal is an Amazon Cognito identity, this value must be the ID of the Amazon Cognito identity.
", "GetEffectivePoliciesRequest$principal": "The principal.
", @@ -4139,7 +4360,7 @@ "Qos": { "base": null, "refs": { - "RepublishAction$qos": "The Quality of Service (QoS) level to use when republishing messages.
" + "RepublishAction$qos": "The Quality of Service (QoS) level to use when republishing messages. The default value is 0.
" } }, "QueryMaxResults": { @@ -4155,6 +4376,8 @@ "CreateDynamicThingGroupRequest$queryString": "The dynamic thing group search query string.
See Query Syntax for information about query string syntax.
", "CreateDynamicThingGroupResponse$queryString": "The dynamic thing group search query string.
", "DescribeThingGroupResponse$queryString": "The dynamic thing group search query string.
", + "GetCardinalityRequest$queryString": "The search query.
", + "GetPercentilesRequest$queryString": "The query string.
", "GetStatisticsRequest$queryString": "The query used to search. You can specify \"*\" for the query string to get the count of all indexed things in your AWS account.
", "SearchIndexRequest$queryString": "The search query string.
", "UpdateDynamicThingGroupRequest$queryString": "The dynamic thing group search query string to update.
" @@ -4166,6 +4389,8 @@ "CreateDynamicThingGroupRequest$queryVersion": "The dynamic thing group query version.
Currently one query version is supported: \"2017-09-30\". If not specified, the query version defaults to this value.
The dynamic thing group query version.
", "DescribeThingGroupResponse$queryVersion": "The dynamic thing group query version.
", + "GetCardinalityRequest$queryVersion": "The query version.
", + "GetPercentilesRequest$queryVersion": "The query version.
", "GetStatisticsRequest$queryVersion": "The version of the query used to search.
", "SearchIndexRequest$queryVersion": "The query version.
", "UpdateDynamicThingGroupRequest$queryVersion": "The dynamic thing group query version to update.
Currently one query version is supported: \"2017-09-30\". If not specified, the query version defaults to this value.
The thing groups to which the security profile is attached.
" } }, + "ServiceName": { + "base": null, + "refs": { + "SigV4Authorization$serviceName": "The service name to use while signing with Sig V4.
" + } + }, "ServiceUnavailableException": { "base": "The service is temporarily unavailable.
", "refs": { @@ -4823,6 +5054,12 @@ "refs": { } }, + "SigV4Authorization": { + "base": "Use Sig V4 authorization.
", + "refs": { + "HttpAuthorization$sigv4": "Use Sig V4 authorization. For more information, see Signature Version 4 Signing Process.
" + } + }, "Signature": { "base": null, "refs": { @@ -4853,6 +5090,12 @@ "StartSigningJobParameter$signingProfileParameter": "Describes the code-signing profile.
" } }, + "SigningRegion": { + "base": null, + "refs": { + "SigV4Authorization$signingRegion": "The signing region.
" + } + }, "SkippedFindingsCount": { "base": null, "refs": { @@ -4961,6 +5204,12 @@ "ListThingRegistrationTasksRequest$status": "The status of the bulk thing provisioning task.
" } }, + "StdDeviation": { + "base": null, + "refs": { + "Statistics$stdDeviation": "The standard deviation of the aggregated field values.
" + } + }, "StepFunctionsAction": { "base": "Starts execution of a Step Functions state machine.
", "refs": { @@ -5074,7 +5323,9 @@ "CloudwatchMetricAction$metricTimestamp": "An optional Unix timestamp.
", "CreateTopicRuleRequest$tags": "Metadata which can be used to manage the topic rule.
For URI Request parameters use format: ...key1=value1&key2=value2...
For the CLI command-line parameter use format: --tags \"key1=value1&key2=value2...\"
For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"
Additional details or reason why the topic rule destination is in the current status.
", + "TopicRuleDestinationSummary$statusReason": "The reason the topic rule destination is in the current status.
" } }, "StringMap": { @@ -5096,6 +5347,18 @@ "JobProcessDetails$numberOfSucceededThings": "The number of things which successfully completed the job.
" } }, + "Sum": { + "base": null, + "refs": { + "Statistics$sum": "The sum of the aggregated fields. If the field data type is String this value is indeterminate.
" + } + }, + "SumOfSquares": { + "base": null, + "refs": { + "Statistics$sumOfSquares": "The sum of the squares of the aggregated field values.
" + } + }, "TableName": { "base": null, "refs": { @@ -5647,6 +5910,45 @@ "GetTopicRuleResponse$rule": "The rule.
" } }, + "TopicRuleDestination": { + "base": "A topic rule destination.
", + "refs": { + "CreateTopicRuleDestinationResponse$topicRuleDestination": "The topic rule destination.
", + "GetTopicRuleDestinationResponse$topicRuleDestination": "The topic rule destination.
" + } + }, + "TopicRuleDestinationConfiguration": { + "base": "Configuration of the topic rule destination.
", + "refs": { + "CreateTopicRuleDestinationRequest$destinationConfiguration": "The topic rule destination configuration.
" + } + }, + "TopicRuleDestinationMaxResults": { + "base": null, + "refs": { + "ListTopicRuleDestinationsRequest$maxResults": "The maximum number of results to return at one time.
" + } + }, + "TopicRuleDestinationStatus": { + "base": null, + "refs": { + "TopicRuleDestination$status": "The status of the topic rule destination. Valid values are:
A topic rule destination was created but has not been confirmed. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
Confirmation was completed, and traffic to this destination is allowed. You can set status
to DISABLED
by calling UpdateTopicRuleDestination
.
Confirmation was completed, and traffic to this destination is not allowed. You can set status
to ENABLED
by calling UpdateTopicRuleDestination
.
Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination
for details about the error. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
The status of the topic rule destination. Valid values are:
A topic rule destination was created but has not been confirmed. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
Confirmation was completed, and traffic to this destination is allowed. You can set status
to DISABLED
by calling UpdateTopicRuleDestination
.
Confirmation was completed, and traffic to this destination is not allowed. You can set status
to ENABLED
by calling UpdateTopicRuleDestination
.
Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination
for details about the error. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
The status of the topic rule destination. Valid values are:
A topic rule destination was created but has not been confirmed. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
Confirmation was completed, and traffic to this destination is allowed. You can set status
to DISABLED
by calling UpdateTopicRuleDestination
.
Confirmation was completed, and traffic to this destination is not allowed. You can set status
to ENABLED
by calling UpdateTopicRuleDestination
.
Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination
for details about the error. You can set status
to IN_PROGRESS
by calling UpdateTopicRuleDestination
. Calling UpdateTopicRuleDestination
causes a new confirmation challenge to be sent to your confirmation endpoint.
Information about a topic rule destination.
" + } + }, + "TopicRuleDestinationSummary": { + "base": "Information about the topic rule destination.
", + "refs": { + "TopicRuleDestinationSummaries$member": null + } + }, "TopicRuleList": { "base": null, "refs": { @@ -5659,6 +5961,12 @@ "TopicRuleList$member": null } }, + "TopicRuleMaxResults": { + "base": null, + "refs": { + "ListTopicRulesRequest$maxResults": "The maximum number of results to return.
" + } + }, "TopicRulePayload": { "base": "Describes a rule.
", "refs": { @@ -5904,6 +6212,26 @@ "refs": { } }, + "UpdateTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "UpdateTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "HttpAction$url": "The endpoint URL. If substitution templates are used in the URL, you must also specify a confirmationUrl
. If this is a new destination, a new TopicRuleDestination
is created if possible.
The URL to which AWS IoT sends a confirmation message. The value of the confirmation URL must be a prefix of the endpoint URL. If you do not specify a confirmation URL AWS IoT uses the endpoint URL as the confirmation URL. If you use substitution templates in the confirmationUrl, you must create and enable topic rule destinations that match each possible value of the substituion template before traffic is allowed to your endpoint URL.
", + "HttpUrlDestinationConfiguration$confirmationUrl": "The URL AWS IoT uses to confirm ownership of or access to the topic rule destination URL.
", + "HttpUrlDestinationProperties$confirmationUrl": "The URL used to confirm the HTTP topic rule destination URL.
", + "HttpUrlDestinationSummary$confirmationUrl": "The URL used to confirm ownership of or access to the HTTP topic rule destination URL.
" + } + }, "UseBase64": { "base": null, "refs": { @@ -5946,6 +6274,12 @@ "Parameters$value": null } }, + "Variance": { + "base": null, + "refs": { + "Statistics$variance": "The variance of the aggregated field values.
" + } + }, "Version": { "base": null, "refs": { diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 0621a198e42..70ab245b1c6 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -2061,10 +2061,13 @@ "nodejs6.10", "nodejs8.10", "nodejs10.x", + "nodejs12.x", "java8", + "java11", "python2.7", "python3.6", "python3.7", + "python3.8", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", diff --git a/models/apis/logs/2014-03-28/docs-2.json b/models/apis/logs/2014-03-28/docs-2.json index f05b73045aa..63521292674 100644 --- a/models/apis/logs/2014-03-28/docs-2.json +++ b/models/apis/logs/2014-03-28/docs-2.json @@ -5,7 +5,7 @@ "AssociateKmsKey": "Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.
Associating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.
Note that it can take up to 5 minutes for this operation to take effect.
If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException
error.
Cancels the specified export task.
The task must be in the PENDING
or RUNNING
state.
Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.
", - "CreateLogGroup": "Creates a log group with the specified name.
You can create up to 5000 log groups per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a region for an AWS account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).
If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException
error.
Creates a log group with the specified name.
You can create up to 20,000 log groups per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a region for an AWS account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException
error.
Creates a log stream for the specified log group.
There is no limit on the number of log streams that you can create for a log group.
You must use the following guidelines when naming a log stream:
Log stream names must be unique within the log group.
Log stream names can be between 1 and 512 characters long.
The ':' (colon) and '*' (asterisk) characters are not allowed.
Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.
", "DeleteLogGroup": "Deletes the specified log group and permanently deletes all the archived log events associated with the log group.
", @@ -29,7 +29,7 @@ "GetLogRecord": "Retrieves all the fields and values of a single log event. All fields are retrieved, even if the original query that produced the logRecordPointer
retrieved only a subset of fields. Fields are returned as field name/field value pairs.
Additionally, the entire unparsed log event is returned within @message
.
Returns the results from the specified query.
Only the fields requested in the query are returned, along with a @ptr
field which is the identifier for the log record. You can use the value of @ptr
in a operation to get the full log record.
GetQueryResults
does not start a query execution. To run a query, use .
If the value of the Status
field in the output is Running
, this operation returns only partial results. If you see a value of Scheduled
or Running
for the status, you can retry the operation later to see the final results.
Lists the tags for the specified log group.
", - "PutDestination": "Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose strea, or an AWS Lambda function.
Through an access policy, a destination controls what is written to it. By default, PutDestination
does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination
.
Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.
Through an access policy, a destination controls what is written to it. By default, PutDestination
does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination
.
Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.
", "PutLogEvents": "Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents
twice within a narrow time period using the same value for sequenceToken
, both calls may be successful, or one may be rejected.
The batch of events must satisfy the following constraints:
The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
None of the log events in the batch can be more than 2 hours in the future.
None of the log events in the batch can be older than 14 days or older than the retention period of the log group.
The log events in the batch must be in chronological ordered by their timestamp. The timestamp is the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)
The maximum number of log events in a batch is 10,000.
A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.
", "PutMetricFilter": "Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
", @@ -314,7 +314,7 @@ "refs": { "FilterLogEventsRequest$limit": "The maximum number of events to return. The default is 10,000 events.
", "GetLogEventsRequest$limit": "The maximum number of log events returned. If you don't specify a value, the maximum is as many log events as can fit in a response size of 1 MB, up to 10,000 log events.
", - "StartQueryRequest$limit": "The maximum number of log events to return in the query. If the query string uses the fields
command, only the specified fields and their values are returned.
The maximum number of log events to return in the query. If the query string uses the fields
command, only the specified fields and their values are returned. The default is 1000.
The token for the next set of events to return. (You received this token from a previous call.)
", "FilterLogEventsResponse$nextToken": "The token to use when requesting the next set of items. The token expires after 24 hours.
", - "GetLogEventsRequest$nextToken": "The token for the next set of items to return. (You received this token from a previous call.)
", + "GetLogEventsRequest$nextToken": "The token for the next set of items to return. (You received this token from a previous call.)
Using this token works only when you specify true
for startFromHead
.
The token for the next set of items in the forward direction. The token expires after 24 hours. If you have reached the end of the stream, it will return the same token you passed in.
", "GetLogEventsResponse$nextBackwardToken": "The token for the next set of items in the backward direction. The token expires after 24 hours. This token will never be null. If you have reached the end of the stream, it will return the same token you passed in.
" } @@ -1043,7 +1043,7 @@ "base": null, "refs": { "LogGroup$storedBytes": "The number of bytes stored.
", - "LogStream$storedBytes": "The number of bytes stored.
IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes
parameter for log groups is not affected.
The number of bytes stored.
IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams, and is always reported as zero. This change applies only to log streams. The storedBytes
parameter for log groups is not affected.
Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.
For information about how to construct an object lifecycle policy, see Components of an Object Lifecycle Policy.
", "StartAccessLogging": "Starts access logging on the specified container. When you enable access logging on a container, MediaStore delivers access logs for objects stored in that container to Amazon CloudWatch Logs.
", "StopAccessLogging": "Stops access logging on the specified container. When you stop access logging on a container, MediaStore stops sending access logs to Amazon CloudWatch Logs. These access logs are not saved and are not retrievable.
", - "TagResource": "Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", + "TagResource": "Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", "UntagResource": "Removes tags from the specified container. You can specify one or more tags to remove.
" }, "shapes": { @@ -374,7 +374,7 @@ } }, "Tag": { - "base": "A collection of tags associated with a container. Each tag consists of a key:value pair, which can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", + "base": "A collection of tags associated with a container. Each tag consists of a key:value pair, which can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", "refs": { "TagList$member": null } @@ -395,7 +395,7 @@ "TagList": { "base": null, "refs": { - "CreateContainerInput$Tags": "An array of key:value pairs that you define. These values can be anything that you want. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", + "CreateContainerInput$Tags": "An array of key:value pairs that you define. These values can be anything that you want. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.
", "ListTagsForResourceOutput$Tags": "An array of key:value pairs that are assigned to the container.
", "TagResourceInput$Tags": "An array of key:value pairs that you want to add to the container. You need to specify only the tags that you want to add or update. For example, suppose a container already has two tags (customer:CompanyA and priority:High). You want to change the priority tag and also add a third tag (type:Contract). For TagResource, you specify the following tags: priority:Medium, type:Contract. The result is that your container has three tags: customer:CompanyA, priority:Medium, and type:Contract.
" } diff --git a/models/apis/meteringmarketplace/2016-01-14/api-2.json b/models/apis/meteringmarketplace/2016-01-14/api-2.json index 1aa1635c974..321b693d018 100644 --- a/models/apis/meteringmarketplace/2016-01-14/api-2.json +++ b/models/apis/meteringmarketplace/2016-01-14/api-2.json @@ -46,7 +46,8 @@ {"shape":"InvalidEndpointRegionException"}, {"shape":"TimestampOutOfBoundsException"}, {"shape":"DuplicateRequestException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"CustomerNotEntitledException"} ] }, "RegisterUsage":{ diff --git a/models/apis/meteringmarketplace/2016-01-14/docs-2.json b/models/apis/meteringmarketplace/2016-01-14/docs-2.json index 56d38c9d0e9..86cfbf42dbb 100644 --- a/models/apis/meteringmarketplace/2016-01-14/docs-2.json +++ b/models/apis/meteringmarketplace/2016-01-14/docs-2.json @@ -1,10 +1,10 @@ { "version": "2.0", - "service": "This reference provides descriptions of the low-level AWS Marketplace Metering Service API.
AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.
Submitting Metering Records
MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is called from an EC2 instance.
BatchMeterUsage- Submits the metering record for a set of customers. BatchMeterUsage is called from a software-as-a-service (SaaS) application.
Accepting New Customers
ResolveCustomer- Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The Registration Token is resolved through this API to obtain a CustomerIdentifier and Product Code.
Entitlement and Metering for Paid Container Products
Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Calling RegisterUsage from containers running outside of Amazon Elastic Container Service (Amazon ECR) isn't supported. Free and BYOL products for ECS aren't required to call RegisterUsage, but you can do so if you want to receive usage data in your seller reports. For more information on using the RegisterUsage operation, see Container-Based Products.
BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .
", + "service": "This reference provides descriptions of the low-level AWS Marketplace Metering Service API.
AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.
Submitting Metering Records
MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is called from an EC2 instance.
BatchMeterUsage- Submits the metering record for a set of customers. BatchMeterUsage is called from a software-as-a-service (SaaS) application.
Accepting New Customers
ResolveCustomer- Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The Registration Token is resolved through this API to obtain a CustomerIdentifier and Product Code.
Entitlement and Metering for Paid Container Products
Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do so if you want to receive usage data in your seller reports. For more information on using the RegisterUsage operation, see Container-Based Products.
BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .
", "operations": { "BatchMeterUsage": "BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same records or a subset of the input records.
Every request to BatchMeterUsage is for one product. If you need to meter usage for multiple products, you must make multiple calls to BatchMeterUsage.
BatchMeterUsage can process up to 25 UsageRecords at a time.
", "MeterUsage": "API to emit metering records. For identical requests, the API is idempotent. It simply returns the metering record ID.
MeterUsage is authenticated on the buyer's AWS account, generally when running from an EC2 instance on the AWS Marketplace.
", - "RegisterUsage": "Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Calling RegisterUsage from containers running outside of ECS is not currently supported. Free and BYOL products for ECS aren't required to call RegisterUsage, but you may choose to do so if you would like to receive usage data in your seller reports. The sections below explain the behavior of RegisterUsage. RegisterUsage performs two primary functions: metering and entitlement.
Entitlement: RegisterUsage allows you to verify that the customer running your paid software is subscribed to your product on AWS Marketplace, enabling you to guard against unauthorized use. Your container image that integrates with RegisterUsage is only required to guard against unauthorized use at container startup, as such a CustomerNotSubscribedException/PlatformNotSupportedException will only be thrown on the initial call to RegisterUsage. Subsequent calls from the same Amazon ECS task instance (e.g. task-id) will not throw a CustomerNotSubscribedException, even if the customer unsubscribes while the Amazon ECS task is still running.
Metering: RegisterUsage meters software use per ECS task, per hour, with usage prorated to the second. A minimum of 1 minute of usage applies to tasks that are short lived. For example, if a customer has a 10 node ECS cluster and creates an ECS service configured as a Daemon Set, then ECS will launch a task on all 10 cluster nodes and the customer will be charged: (10 * hourly_rate). Metering for software use is automatically handled by the AWS Marketplace Metering Control Plane -- your software is not required to perform any metering specific actions, other than call RegisterUsage once for metering of software use to commence. The AWS Marketplace Metering Control Plane will also continue to bill customers for running ECS tasks, regardless of the customers subscription state, removing the need for your software to perform entitlement checks at runtime.
Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you may choose to do so if you would like to receive usage data in your seller reports. The sections below explain the behavior of RegisterUsage. RegisterUsage performs two primary functions: metering and entitlement.
Entitlement: RegisterUsage allows you to verify that the customer running your paid software is subscribed to your product on AWS Marketplace, enabling you to guard against unauthorized use. Your container image that integrates with RegisterUsage is only required to guard against unauthorized use at container startup, as such a CustomerNotSubscribedException/PlatformNotSupportedException will only be thrown on the initial call to RegisterUsage. Subsequent calls from the same Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a CustomerNotSubscribedException, even if the customer unsubscribes while the Amazon ECS task or Amazon EKS pod is still running.
Metering: RegisterUsage meters software use per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the second. A minimum of 1 minute of usage applies to tasks that are short lived. For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a task on all 10 cluster nodes and the customer will be charged: (10 * hourly_rate). Metering for software use is automatically handled by the AWS Marketplace Metering Control Plane -- your software is not required to perform any metering specific actions, other than call RegisterUsage once for metering of software use to commence. The AWS Marketplace Metering Control Plane will also continue to bill customers for running ECS tasks and Amazon EKS pods, regardless of the customers subscription state, removing the need for your software to perform entitlement checks at runtime.
ResolveCustomer is called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a registration token through their browser. The registration token is resolved through this API to obtain a CustomerIdentifier and product code.
" }, "shapes": { diff --git a/models/apis/migrationhub-config/2019-06-30/api-2.json b/models/apis/migrationhub-config/2019-06-30/api-2.json new file mode 100644 index 00000000000..f2ea8f956b7 --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/api-2.json @@ -0,0 +1,207 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-30", + "endpointPrefix":"migrationhub-config", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Migration Hub Config", + "serviceId":"MigrationHub Config", + "signatureVersion":"v4", + "signingName":"mgh", + "targetPrefix":"AWSMigrationHubMultiAccountService", + "uid":"migrationhub-config-2019-06-30" + }, + "operations":{ + "CreateHomeRegionControl":{ + "name":"CreateHomeRegionControl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHomeRegionControlRequest"}, + "output":{"shape":"CreateHomeRegionControlResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DryRunOperation"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeHomeRegionControls":{ + "name":"DescribeHomeRegionControls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHomeRegionControlsRequest"}, + "output":{"shape":"DescribeHomeRegionControlsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"} + ] + }, + "GetHomeRegion":{ + "name":"GetHomeRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHomeRegionRequest"}, + "output":{"shape":"GetHomeRegionResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ControlId":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^hrc-[a-z0-9]{12}$" + }, + "CreateHomeRegionControlRequest":{ + "type":"structure", + "required":[ + "HomeRegion", + "Target" + ], + "members":{ + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "DryRun":{"shape":"DryRun"} + } + }, + "CreateHomeRegionControlResult":{ + "type":"structure", + "members":{ + "HomeRegionControl":{"shape":"HomeRegionControl"} + } + }, + "DescribeHomeRegionControlsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeHomeRegionControlsRequest":{ + "type":"structure", + "members":{ + "ControlId":{"shape":"ControlId"}, + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "MaxResults":{"shape":"DescribeHomeRegionControlsMaxResults"}, + "NextToken":{"shape":"Token"} + } + }, + "DescribeHomeRegionControlsResult":{ + "type":"structure", + "members":{ + "HomeRegionControls":{"shape":"HomeRegionControls"}, + "NextToken":{"shape":"Token"} + } + }, + "DryRun":{"type":"boolean"}, + "DryRunOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "GetHomeRegionRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHomeRegionResult":{ + "type":"structure", + "members":{ + "HomeRegion":{"shape":"HomeRegion"} + } + }, + "HomeRegion":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^([a-z]+)-([a-z]+)-([0-9]+)$" + }, + "HomeRegionControl":{ + "type":"structure", + "members":{ + "ControlId":{"shape":"ControlId"}, + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "RequestedTime":{"shape":"RequestedTime"} + } + }, + "HomeRegionControls":{ + "type":"list", + "member":{"shape":"HomeRegionControl"}, + "max":100 + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "RequestedTime":{"type":"timestamp"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "Target":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"TargetType"}, + "Id":{"shape":"TargetId"} + } + }, + "TargetId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "TargetType":{ + "type":"string", + "enum":["ACCOUNT"] + }, + "Token":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + } + } +} diff --git a/models/apis/migrationhub-config/2019-06-30/docs-2.json b/models/apis/migrationhub-config/2019-06-30/docs-2.json new file mode 100644 index 00000000000..e6e91ce1d19 --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/docs-2.json @@ -0,0 +1,150 @@ +{ + "version": "2.0", + "service": "The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region.
You can use these APIs within your home region only. If you call these APIs from outside your home region, your calls are rejected, except for the ability to register your agents and connectors.
You must call GetHomeRegion
at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.
The StartDataCollection
API call in AWS Application Discovery Service allows your agents and connectors to begin collecting data that flows directly into the home region, and it will prevent you from enabling data collection information to be sent outside the home region.
For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference.
The Migration Hub Home Region APIs do not support AWS Organizations.
This API sets up the home region for the calling account only.
", + "DescribeHomeRegionControls": "This API permits filtering on the ControlId
, HomeRegion
, and RegionControlScope
fields.
Returns the calling account’s home region, if configured. This API is used by other AWS services to determine the regional endpoint for calling AWS Application Discovery Service and Migration Hub. You must call GetHomeRegion
at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.
You do not have sufficient access to perform this action.
", + "refs": { + } + }, + "ControlId": { + "base": null, + "refs": { + "DescribeHomeRegionControlsRequest$ControlId": "The ControlID
is a unique identifier string of your HomeRegionControl
object.
A unique identifier that's generated for each home region control. It's always a string that begins with \"hrc-\" followed by 12 lowercase letters and numbers.
" + } + }, + "CreateHomeRegionControlRequest": { + "base": null, + "refs": { + } + }, + "CreateHomeRegionControlResult": { + "base": null, + "refs": { + } + }, + "DescribeHomeRegionControlsMaxResults": { + "base": null, + "refs": { + "DescribeHomeRegionControlsRequest$MaxResults": "The maximum number of filtering results to display per page.
" + } + }, + "DescribeHomeRegionControlsRequest": { + "base": null, + "refs": { + } + }, + "DescribeHomeRegionControlsResult": { + "base": null, + "refs": { + } + }, + "DryRun": { + "base": null, + "refs": { + "CreateHomeRegionControlRequest$DryRun": "Optional Boolean flag to indicate whether any effect should take place. It tests whether the caller has permission to make the call.
" + } + }, + "DryRunOperation": { + "base": "Exception raised to indicate that authorization of an action was successful, when the DryRun
flag is set to true.
The name of the home region of the calling account.
", + "DescribeHomeRegionControlsRequest$HomeRegion": "The name of the home region you'd like to view.
", + "GetHomeRegionResult$HomeRegion": "The name of the home region of the calling account.
", + "HomeRegionControl$HomeRegion": "The AWS Region that's been set as home region. For example, \"us-west-2\" or \"eu-central-1\" are valid home regions.
" + } + }, + "HomeRegionControl": { + "base": "A home region control is an object that specifies the home region for an account, with some additional information. It contains a target (always of type ACCOUNT
), an ID, and a time at which the home region was set.
This object is the HomeRegionControl
object that's returned by a successful call to CreateHomeRegionControl
.
An array that contains your HomeRegionControl
objects.
Exception raised when an internal, configuration, or dependency error is encountered.
", + "refs": { + } + }, + "InvalidInputException": { + "base": "Exception raised when the provided input violates a policy constraint or is entered in the wrong format or data type.
", + "refs": { + } + }, + "RequestedTime": { + "base": null, + "refs": { + "HomeRegionControl$RequestedTime": "A timestamp representing the time when the customer called CreateHomeregionControl
and set the home region for the account.
Exception raised when a request fails due to temporary unavailability of the service.
", + "refs": { + } + }, + "Target": { + "base": "The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT
. It applies the home region to the current ACCOUNT
.
The account for which this command sets up a home region control. The Target
is always of type ACCOUNT
.
The target parameter specifies the identifier to which the home region is applied, which is always of type ACCOUNT
. It applies the home region to the current ACCOUNT
.
The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT
. It applies the home region to the current ACCOUNT
.
The TargetID
is a 12-character identifier of the ACCOUNT
for which the control was created. (This must be the current account.)
The target type is always an ACCOUNT
.
If a NextToken
was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken
.
If a NextToken
was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken
.
Amazon Personalize is a machine learning service that makes it easy to add individualized recommendations to customers.
", "operations": { + "CreateBatchInferenceJob": "Creates a batch inference job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see recommendations-batch.
", "CreateCampaign": "Creates a campaign by deploying a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.
Minimum Provisioned TPS and Auto-Scaling
A transaction is a single GetRecommendations
or GetPersonalizedRanking
call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS
) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge. If your TPS increases beyond minProvisionedTPS
, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS
, to maintain a 70% utilization. There's a short time delay while the capacity is increased that might cause loss of transactions. It's recommended to start with a low minProvisionedTPS
, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS
as necessary.
Status
A campaign can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the campaign status, call DescribeCampaign.
Wait until the status
of the campaign is ACTIVE
before asking the campaign for recommendations.
Related APIs
", "CreateDataset": "Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob to import your training data to a dataset.
There are three types of datasets:
Interactions
Items
Users
Each dataset type has an associated schema with required field types. Only the Interactions
dataset is required in order to train a model (also referred to as creating a solution).
A dataset can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the status of the dataset, call DescribeDataset.
Related APIs
", "CreateDatasetGroup": "Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:
Interactions
Items
Users
To train a model (create a solution), a dataset group that contains an Interactions
dataset is required. Call CreateDataset to add a dataset to the group.
A dataset group can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING
To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the creation failed.
You must wait until the status
of the dataset group is ACTIVE
before adding a dataset to the group.
You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an AWS Identity and Access Management (IAM) role that has permission to access the key.
APIs that require a dataset group ARN in the request
Related APIs
", @@ -17,6 +18,7 @@ "DeleteSchema": "Deletes a schema. Before deleting a schema, you must delete all datasets referencing the schema. For more information on schemas, see CreateSchema.
", "DeleteSolution": "Deletes all versions of a solution and the Solution
object itself. Before deleting a solution, you must delete all campaigns based on the solution. To determine what campaigns are using the solution, call ListCampaigns and supply the Amazon Resource Name (ARN) of the solution. You can't delete a solution if an associated SolutionVersion
is in the CREATE PENDING or IN PROGRESS state. For more information on solutions, see CreateSolution.
Describes the given algorithm.
", + "DescribeBatchInferenceJob": "Gets the properties of a batch inference job including name, Amazon Resource Name (ARN), status, input and output configurations, and the ARN of the solution version used to generate the recommendations.
", "DescribeCampaign": "Describes the given campaign, including its status.
A campaign can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
When the status
is CREATE FAILED
, the response includes the failureReason
key, which describes why.
For more information on campaigns, see CreateCampaign.
", "DescribeDataset": "Describes the given dataset. For more information on datasets, see CreateDataset.
", "DescribeDatasetGroup": "Describes the given dataset group. For more information on dataset groups, see CreateDatasetGroup.
", @@ -28,6 +30,7 @@ "DescribeSolution": "Describes a solution. For more information on solutions, see CreateSolution.
", "DescribeSolutionVersion": "Describes a specific version of a solution. For more information on solutions, see CreateSolution.
", "GetSolutionMetrics": "Gets the metrics for the specified solution version.
", + "ListBatchInferenceJobs": "Gets a list of the batch inference jobs that have been performed off of a solution version.
", "ListCampaigns": "Returns a list of campaigns that use the given solution. When a solution is not specified, all the campaigns associated with the account are listed. The response provides the properties for each campaign, including the Amazon Resource Name (ARN). For more information on campaigns, see CreateCampaign.
", "ListDatasetGroups": "Returns a list of dataset groups. The response provides the properties for each dataset group, including the Amazon Resource Name (ARN). For more information on dataset groups, see CreateDatasetGroup.
", "ListDatasetImportJobs": "Returns a list of dataset import jobs that use the given dataset. When a dataset is not specified, all the dataset import jobs associated with the account are listed. The response provides the properties for each dataset import job, including the Amazon Resource Name (ARN). For more information on dataset import jobs, see CreateDatasetImportJob. For more information on datasets, see CreateDataset.
", @@ -65,10 +68,15 @@ "Algorithm$roleArn": "The Amazon Resource Name (ARN) of the role.
", "ArnList$member": null, "AutoMLResult$bestRecipeArn": "The Amazon Resource Name (ARN) of the best recipe.
", + "BatchInferenceJob$batchInferenceJobArn": "The Amazon Resource Name (ARN) of the batch inference job.
", + "BatchInferenceJob$solutionVersionArn": "The Amazon Resource Name (ARN) of the solution version from which the batch inference job was created.
", + "BatchInferenceJobSummary$batchInferenceJobArn": "The Amazon Resource Name (ARN) of the batch inference job.
", "Campaign$campaignArn": "The Amazon Resource Name (ARN) of the campaign.
", "Campaign$solutionVersionArn": "The Amazon Resource Name (ARN) of a specific version of the solution.
", "CampaignSummary$campaignArn": "The Amazon Resource Name (ARN) of the campaign.
", "CampaignUpdateSummary$solutionVersionArn": "The Amazon Resource Name (ARN) of the deployed solution version.
", + "CreateBatchInferenceJobRequest$solutionVersionArn": "The Amazon Resource Name (ARN) of the solution version that will be used to generate the batch inference recommendations.
", + "CreateBatchInferenceJobResponse$batchInferenceJobArn": "The ARN of the batch inference job.
", "CreateCampaignRequest$solutionVersionArn": "The Amazon Resource Name (ARN) of the solution version to deploy.
", "CreateCampaignResponse$campaignArn": "The Amazon Resource Name (ARN) of the campaign.
", "CreateDatasetGroupResponse$datasetGroupArn": "The Amazon Resource Name (ARN) of the new dataset group.
", @@ -104,6 +112,7 @@ "DeleteSchemaRequest$schemaArn": "The Amazon Resource Name (ARN) of the schema to delete.
", "DeleteSolutionRequest$solutionArn": "The ARN of the solution to delete.
", "DescribeAlgorithmRequest$algorithmArn": "The Amazon Resource Name (ARN) of the algorithm to describe.
", + "DescribeBatchInferenceJobRequest$batchInferenceJobArn": "The ARN of the batch inference job to describe.
", "DescribeCampaignRequest$campaignArn": "The Amazon Resource Name (ARN) of the campaign.
", "DescribeDatasetGroupRequest$datasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group to describe.
", "DescribeDatasetImportJobRequest$datasetImportJobArn": "The Amazon Resource Name (ARN) of the dataset import job to describe.
", @@ -120,6 +129,7 @@ "FeatureTransformation$featureTransformationArn": "The Amazon Resource Name (ARN) of the FeatureTransformation object.
", "GetSolutionMetricsRequest$solutionVersionArn": "The Amazon Resource Name (ARN) of the solution version for which to get metrics.
", "GetSolutionMetricsResponse$solutionVersionArn": "The same solution version ARN as specified in the request.
", + "ListBatchInferenceJobsRequest$solutionVersionArn": "The Amazon Resource Name (ARN) of the solution version from which the batch inference jobs were created.
", "ListCampaignsRequest$solutionArn": "The Amazon Resource Name (ARN) of the solution to list the campaigns for. When a solution is not specified, all the campaigns associated with the account are listed.
", "ListDatasetImportJobsRequest$datasetArn": "The Amazon Resource Name (ARN) of the dataset to list the dataset import jobs for.
", "ListDatasetsRequest$datasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group that contains the datasets to list.
", @@ -169,6 +179,38 @@ "DatasetSchema$schema": "The schema.
" } }, + "BatchInferenceJob": { + "base": "Contains information on a batch inference job.
", + "refs": { + "DescribeBatchInferenceJobResponse$batchInferenceJob": "Information on the specified batch inference job.
" + } + }, + "BatchInferenceJobInput": { + "base": "The input configuration of a batch inference job.
", + "refs": { + "BatchInferenceJob$jobInput": "The Amazon S3 path that leads to the input data used to generate the batch inference job.
", + "CreateBatchInferenceJobRequest$jobInput": "The Amazon S3 path that leads to the input file to base your recommendations on. The input material must be in JSON format.
" + } + }, + "BatchInferenceJobOutput": { + "base": "The output configuration parameters of a batch inference job.
", + "refs": { + "BatchInferenceJob$jobOutput": "The Amazon S3 bucket that contains the output data generated by the batch inference job.
", + "CreateBatchInferenceJobRequest$jobOutput": "The path to the Amazon S3 bucket where the job's output will be stored.
" + } + }, + "BatchInferenceJobSummary": { + "base": "A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs operation returns a list of batch inference job summaries.
", + "refs": { + "BatchInferenceJobs$member": null + } + }, + "BatchInferenceJobs": { + "base": null, + "refs": { + "ListBatchInferenceJobsResponse$batchInferenceJobs": "A list containing information on each job that is returned.
" + } + }, "Boolean": { "base": null, "refs": { @@ -250,6 +292,16 @@ "DefaultContinuousHyperParameterRange$minValue": "The minimum allowable value for the hyperparameter.
" } }, + "CreateBatchInferenceJobRequest": { + "base": null, + "refs": { + } + }, + "CreateBatchInferenceJobResponse": { + "base": null, + "refs": { + } + }, "CreateCampaignRequest": { "base": null, "refs": { @@ -416,6 +468,10 @@ "refs": { "Algorithm$creationDateTime": "The date and time (in Unix time) that the algorithm was created.
", "Algorithm$lastUpdatedDateTime": "The date and time (in Unix time) that the algorithm was last updated.
", + "BatchInferenceJob$creationDateTime": "The time at which the batch inference job was created.
", + "BatchInferenceJob$lastUpdatedDateTime": "The time at which the batch inference job was last updated.
", + "BatchInferenceJobSummary$creationDateTime": "The time at which the batch inference job was created.
", + "BatchInferenceJobSummary$lastUpdatedDateTime": "The time at which the batch inference job was last updated.
", "Campaign$creationDateTime": "The date and time (in Unix format) that the campaign was created.
", "Campaign$lastUpdatedDateTime": "The date and time (in Unix format) that the campaign was last updated.
", "CampaignSummary$creationDateTime": "The date and time (in Unix time) that the campaign was created.
", @@ -540,6 +596,16 @@ "refs": { } }, + "DescribeBatchInferenceJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeBatchInferenceJobResponse": { + "base": null, + "refs": { + } + }, "DescribeCampaignRequest": { "base": null, "refs": { @@ -698,6 +764,8 @@ "FailureReason": { "base": null, "refs": { + "BatchInferenceJob$failureReason": "If the batch inference job failed, the reason for the failure.
", + "BatchInferenceJobSummary$failureReason": "If the batch inference job failed, the reason for the failure.
", "Campaign$failureReason": "If a campaign fails, the reason behind the failure.
", "CampaignSummary$failureReason": "If a campaign fails, the reason behind the failure.
", "CampaignUpdateSummary$failureReason": "If a campaign update fails, the reason behind the failure.
", @@ -740,7 +808,7 @@ "HPOConfig": { "base": "Describes the properties for hyperparameter optimization (HPO). For use with the bring-your-own-recipe feature. Do not use for Amazon Personalize native recipes.
", "refs": { - "SolutionConfig$hpoConfig": "Describes the properties for hyperparameter optimization (HPO). For use with the bring-your-own-recipe feature. Not used with Amazon Personalize predefined recipes.
" + "SolutionConfig$hpoConfig": "Describes the properties for hyperparameter optimization (HPO).
" } }, "HPOObjective": { @@ -821,7 +889,8 @@ "base": null, "refs": { "CreateDatasetGroupRequest$kmsKeyArn": "The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets.
", - "DatasetGroup$kmsKeyArn": "The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.
" + "DatasetGroup$kmsKeyArn": "The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.
", + "S3DataConfig$kmsKeyArn": "The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.
" } }, "LimitExceededException": { @@ -829,6 +898,16 @@ "refs": { } }, + "ListBatchInferenceJobsRequest": { + "base": null, + "refs": { + } + }, + "ListBatchInferenceJobsResponse": { + "base": null, + "refs": { + } + }, "ListCampaignsRequest": { "base": null, "refs": { @@ -922,6 +1001,7 @@ "MaxResults": { "base": null, "refs": { + "ListBatchInferenceJobsRequest$maxResults": "The maximum number of batch inference job results to return in each page. The default value is 100.
", "ListCampaignsRequest$maxResults": "The maximum number of campaigns to return.
", "ListDatasetGroupsRequest$maxResults": "The maximum number of dataset groups to return.
", "ListDatasetImportJobsRequest$maxResults": "The maximum number of dataset import jobs to return.
", @@ -964,8 +1044,11 @@ "refs": { "Algorithm$name": "The name of the algorithm.
", "AlgorithmImage$name": "The name of the algorithm image.
", + "BatchInferenceJob$jobName": "The name of the batch inference job.
", + "BatchInferenceJobSummary$jobName": "The name of the batch inference job.
", "Campaign$name": "The name of the campaign.
", "CampaignSummary$name": "The name of the campaign.
", + "CreateBatchInferenceJobRequest$jobName": "The name of the batch inference job to create.
", "CreateCampaignRequest$name": "A name for the new campaign. The campaign name must be unique within your account.
", "CreateDatasetGroupRequest$name": "The name for the new dataset group.
", "CreateDatasetImportJobRequest$jobName": "The name for the dataset import job.
", @@ -993,6 +1076,8 @@ "NextToken": { "base": null, "refs": { + "ListBatchInferenceJobsRequest$nextToken": "The token to request the next page of results.
", + "ListBatchInferenceJobsResponse$nextToken": "The token to use to retreive the next page of results. The value is null
when there are no more results to return.
A token returned from the previous call to ListCampaigns
for getting the next set of campaigns (if they exist).
A token for getting the next set of campaigns (if they exist).
", "ListDatasetGroupsRequest$nextToken": "A token returned from the previous call to ListDatasetGroups
for getting the next set of dataset groups (if they exist).
A token for getting the next set of solutions (if they exist).
" } }, + "NumBatchResults": { + "base": null, + "refs": { + "BatchInferenceJob$numResults": "The number of recommendations generated by the batch inference job. This number includes the error messages generated for failed input records.
", + "CreateBatchInferenceJobRequest$numResults": "The number of recommendations to retreive.
" + } + }, "ParameterName": { "base": null, "refs": { @@ -1106,15 +1198,25 @@ "RoleArn": { "base": null, "refs": { + "BatchInferenceJob$roleArn": "The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch inference job.
", + "CreateBatchInferenceJobRequest$roleArn": "The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and out Amazon S3 buckets respectively.
", "CreateDatasetGroupRequest$roleArn": "The ARN of the IAM role that has permissions to access the KMS key. Supplying an IAM role is only valid when also specifying a KMS key.
", "CreateDatasetImportJobRequest$roleArn": "The ARN of the IAM role that has permissions to read from the Amazon S3 data source.
", "DatasetGroup$roleArn": "The ARN of the IAM role that has permissions to create the dataset group.
" } }, + "S3DataConfig": { + "base": "The configuration details of an Amazon S3 input or output bucket.
", + "refs": { + "BatchInferenceJobInput$s3DataSource": "The URI of the Amazon S3 location that contains your input data. The Amazon S3 bucket must be in the same region as the API endpoint you are calling.
", + "BatchInferenceJobOutput$s3DataDestination": "Information on the Amazon S3 bucket in which the batch inference job's output is stored.
" + } + }, "S3Location": { "base": null, "refs": { - "DataSource$dataLocation": "The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:
s3://bucket-name/training-data.csv
The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:
s3://bucket-name/training-data.csv
The file path of the Amazon S3 bucket.
" } }, "Schemas": { @@ -1171,6 +1273,8 @@ "Status": { "base": null, "refs": { + "BatchInferenceJob$status": "The status of the batch inference job. The status is one of the following values:
PENDING
IN PROGRESS
ACTIVE
CREATE FAILED
The status of the batch inference job. The status is one of the following values:
PENDING
IN PROGRESS
ACTIVE
CREATE FAILED
The status of the campaign.
A campaign can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the campaign.
A campaign can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the campaign update.
A campaign update can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
Creates a message template that you can use in messages that are sent through a push notification channel.
", "CreateSegment" : "Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application.
", "CreateSmsTemplate" : "Creates a message template that you can use in messages that are sent through the SMS channel.
", + "CreateVoiceTemplate" : "Creates a message template that you can use in messages that are sent through the voice channel.
", "DeleteAdmChannel" : "Disables the ADM channel for an application and deletes any existing settings for the channel.
", "DeleteApnsChannel" : "Disables the APNs channel for an application and deletes any existing settings for the channel.
", "DeleteApnsSandboxChannel" : "Disables the APNs sandbox channel for an application and deletes any existing settings for the channel.
", @@ -31,6 +32,7 @@ "DeleteSmsTemplate" : "Deletes a message template that was designed for use in messages that were sent through the SMS channel.
", "DeleteUserEndpoints" : "Deletes all the endpoints that are associated with a specific user ID.
", "DeleteVoiceChannel" : "Disables the voice channel for an application and deletes any existing settings for the channel.
", + "DeleteVoiceTemplate" : "Deletes a message template that was designed for use in messages that were sent through the voice channel.
", "GetAdmChannel" : "Retrieves information about the status and settings of the ADM channel for an application.
", "GetApnsChannel" : "Retrieves information about the status and settings of the APNs channel for an application.
", "GetApnsSandboxChannel" : "Retrieves information about the status and settings of the APNs sandbox channel for an application.
", @@ -72,6 +74,7 @@ "GetSmsTemplate" : "Retrieves the content and settings for a message template that you can use in messages that are sent through the SMS channel.
", "GetUserEndpoints" : "Retrieves information about all the endpoints that are associated with a specific user ID.
", "GetVoiceChannel" : "Retrieves information about the status and settings of the voice channel for an application.
", + "GetVoiceTemplate" : "Retrieves the content and settings for a message template that you can use in messages that are sent through the voice channel.
", "ListJourneys" : "Retrieves information about the status, configuration, and other settings for all the journeys that are associated with an application.
", "ListTagsForResource" : "Retrieves all the tags (keys and values) that are associated with an application, campaign, journey, message template, or segment.
", "ListTemplates" : "Retrieves information about all the message templates that are associated with your Amazon Pinpoint account.
", @@ -102,7 +105,8 @@ "UpdateSegment" : "Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application.
", "UpdateSmsChannel" : "Enables the SMS channel for an application or updates the status and settings of the SMS channel for an application.
", "UpdateSmsTemplate" : "Updates an existing message template that you can use in messages that are sent through the SMS channel.
", - "UpdateVoiceChannel" : "Enables the voice channel for an application or updates the status and settings of the voice channel for an application.
" + "UpdateVoiceChannel" : "Enables the voice channel for an application or updates the status and settings of the voice channel for an application.
", + "UpdateVoiceTemplate" : "Updates an existing message template that you can use in messages that are sent through the voice channel.
" }, "shapes" : { "ADMChannelRequest" : { @@ -376,9 +380,9 @@ "refs" : { } }, "DefaultMessage" : { - "base" : "Specifies the default message to use for all channels.
", + "base" : "Specifies the default message for all channels.
", "refs" : { - "DirectMessageConfiguration$DefaultMessage" : "The default message body for all channels.
" + "DirectMessageConfiguration$DefaultMessage" : "The default message for all channels.
" } }, "DefaultPushNotificationMessage" : { @@ -410,8 +414,8 @@ "DirectMessageConfiguration" : { "base" : "Specifies the settings and content for the default message and any default messages that you tailored for specific channels.
", "refs" : { - "MessageRequest$MessageConfiguration" : "The set of properties that defines the configuration settings for the message.
", - "SendUsersMessageRequest$MessageConfiguration" : "The message definitions for the default message and any default messages that you defined for specific channels.
" + "MessageRequest$MessageConfiguration" : "The settings and content for the default message and any default messages that you defined for specific channels.
", + "SendUsersMessageRequest$MessageConfiguration" : "The settings and content for the default message and any default messages that you defined for specific channels.
" } }, "Duration" : { @@ -749,7 +753,7 @@ } }, "MessageRequest" : { - "base" : "Specifies the objects that define configuration and other settings for a message.
", + "base" : "Specifies the configuration and other settings for a message.
", "refs" : { } }, "MessageResponse" : { @@ -1030,9 +1034,9 @@ "SimpleEmailPart" : { "base" : "Specifies the subject or body of an email message, represented as textual email data and the applicable character set.
", "refs" : { - "SimpleEmail$HtmlPart" : "The body of the email message, in HTML format. We recommend using an HTML part for email clients that support HTML. You can include links, formatted text, and more in an HTML message.
", + "SimpleEmail$HtmlPart" : "The body of the email message, in HTML format. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message.
", "SimpleEmail$Subject" : "The subject line, or title, of the email.
", - "SimpleEmail$TextPart" : "The body of the email message, in text format. We recommend using a text part for email clients that don't support HTML and clients that are connected to high-latency networks, such as mobile devices.
" + "SimpleEmail$TextPart" : "The body of the email message, in plain text format. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.
" } }, "SourceType" : { @@ -1065,11 +1069,12 @@ "refs" : { "TemplateConfiguration$EmailTemplate" : "The email template to use for the message.
", "TemplateConfiguration$PushTemplate" : "The push notification template to use for the message.
", - "TemplateConfiguration$SMSTemplate" : "The SMS template to use for the message.
" + "TemplateConfiguration$SMSTemplate" : "The SMS template to use for the message.
", + "TemplateConfiguration$VoiceTemplate" : "The voice template to use for the message.
" } }, "TemplateConfiguration" : { - "base" : "Specifies the message template for each type of channel.
", + "base" : "Specifies the message template to use for the message, for each type of channel.
", "refs" : { "CampaignResponse$TemplateConfiguration" : "The message template that’s used for the campaign.
", "MessageRequest$TemplateConfiguration" : "The message template to use for the message.
", @@ -1091,7 +1096,8 @@ "EmailTemplateResponse$TemplateType" : "The type of channel that the message template is designed for. For an email template, this value is EMAIL.
", "PushNotificationTemplateResponse$TemplateType" : "The type of channel that the message template is designed for. For a push notification template, this value is PUSH.
", "SMSTemplateResponse$TemplateType" : "The type of channel that the message template is designed for. For an SMS template, this value is SMS.
", - "TemplateResponse$TemplateType" : "The type of channel that the message template is designed for.
" + "TemplateResponse$TemplateType" : "The type of channel that the message template is designed for.
", + "VoiceTemplateResponse$TemplateType" : "The type of channel that the message template is designed for. For a voice template, this value is VOICE.
" } }, "TemplatesResponse" : { @@ -1132,6 +1138,14 @@ "DirectMessageConfiguration$VoiceMessage" : "The default message for the voice channel. This message overrides the default message (DefaultMessage).
" } }, + "VoiceTemplateRequest" : { + "base" : "Specifies the content and settings for a message template that can be used in messages that are sent through the voice channel.
", + "refs" : { } + }, + "VoiceTemplateResponse" : { + "base" : "Provides information about the content and settings for a message template that can be used in messages that are sent through the voice channel.
", + "refs" : { } + }, "WaitActivity" : { "base" : "Specifies the settings for a wait activity in a journey. This type of activity waits for a certain amount of time or until a specific date and time before moving participants to the next activity in a journey.
", "refs" : { @@ -1551,9 +1565,9 @@ "refs" : { "ADMMessage$Substitutions" : "The default message variables to use in the notification message. You can override the default variables with individual address variables.
", "APNSMessage$Substitutions" : "The default message variables to use in the notification message. You can override these default variables with individual address variables.
", - "AddressConfiguration$Substitutions" : "An object that maps variable values for the message. Amazon Pinpoint merges these values with the variable values specified by properties of the DefaultMessage object. The substitutions in this map take precedence over all other substitutions.
", + "AddressConfiguration$Substitutions" : "A map of the message variables to merge with the variables specified by properties of the DefaultMessage object. The variables specified in this map take precedence over all other variables.
", "BaiduMessage$Substitutions" : "The default message variables to use in the notification message. You can override the default variables with individual address variables.
", - "DefaultMessage$Substitutions" : "The default message variables to use in the push notification, email, or SMS message. You can override these default variables with individual address variables.
", + "DefaultMessage$Substitutions" : "The default message variables to use in the message. You can override these default variables with individual address variables.
", "DefaultPushNotificationMessage$Substitutions" : "The default message variables to use in the notification message. You can override the default variables with individual address variables.
", "EmailMessage$Substitutions" : "The default message variables to use in the email message. You can override the default variables with individual address variables.
", "EndpointBatchItem$Attributes" : "One or more custom attributes that describe the endpoint by associating a name with an array of values. For example, the value of a custom attribute named Interests might be: [\"science\", \"music\", \"travel\"]. You can use these attributes as filter criteria when you create segments.
When you define the name of a custom attribute, avoid using the following characters: number sign (#), colon (:), question mark (?), backslash (\\), and slash (/). The Amazon Pinpoint console can't display attribute names that contain these characters. This limitation doesn't apply to attribute values.
", @@ -1601,6 +1615,8 @@ "SendUsersMessageRequest$Context" : "A map of custom attribute-value pairs. For a push notification, Amazon Pinpoint adds these attributes to the data.pinpoint object in the body of the notification payload. Amazon Pinpoint also provides these attributes in the events that it generates for users-messages deliveries.
", "TagsModel$tags" : "A string-to-string map of key-value pairs that defines the tags for an application, campaign, journey, message template, or segment. Each of these resources can have a maximum of 50 tags.
Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.
", "TemplateResponse$tags" : "A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.
", + "VoiceTemplateRequest$tags" : "A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.
", + "VoiceTemplateResponse$tags" : "A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.
", "WriteCampaignRequest$tags" : "A string-to-string map of key-value pairs that defines the tags to associate with the campaign. Each tag consists of a required tag key and an associated tag value.
", "WriteSegmentRequest$tags" : "A string-to-string map of key-value pairs that defines the tags to associate with the segment. Each tag consists of a required tag key and an associated tag value.
" } @@ -1623,7 +1639,7 @@ "ADMMessage$ImageIconUrl" : "The URL of the large icon image to display in the content view of the push notification.
", "ADMMessage$ImageUrl" : "The URL of an image to display in the push notification.
", "ADMMessage$MD5" : "The base64-encoded, MD5 checksum of the value specified by the Data property. ADM uses the MD5 value to verify the integrity of the data.
", - "ADMMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.
", + "ADMMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.
", "ADMMessage$SmallImageIconUrl" : "The URL of the small icon image to display in the status bar and the content view of the push notification.
", "ADMMessage$Sound" : "The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.
", "ADMMessage$Title" : "The title to display above the notification message on the recipient's device.
", @@ -1649,13 +1665,14 @@ "APNSMessage$MediaUrl" : "The URL of an image or video to display in the push notification.
", "APNSMessage$PreferredAuthenticationMethod" : "The authentication method that you want Amazon Pinpoint to use when authenticating with APNs, CERTIFICATE or TOKEN.
", "APNSMessage$Priority" : "para>5 - Low priority, the notification might be delayed, delivered as part of a group, or throttled.
/listitem>10 - High priority, the notification is sent immediately. This is the default value. A high priority notification should trigger an alert, play a sound, or badge your app's icon on the recipient's device.
Amazon Pinpoint specifies this value in the apns-priority request header when it sends the notification message to APNs.
The equivalent values for Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), are normal, for 5, and high, for 10. If you specify an FCM value for this property, Amazon Pinpoint accepts and converts the value to the corresponding APNs value.
", - "APNSMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides all other content for the message.
If you specify the raw content of an APNs push notification, the message payload has to include the content-available key. The value of the content-available key has to be an integer, and can only be 0 or 1. If you're sending a standard notification, set the value of content-available to 0. If you're sending a silent (background) notification, set the value of content-available to 1. Additionally, silent notification payloads can't include the alert, badge, or sound keys. For more information, see Generating a Remote Notification and Pushing Background Updates to Your App on the Apple Developer website.
The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.
If you specify the raw content of an APNs push notification, the message payload has to include the content-available key. The value of the content-available key has to be an integer, and can only be 0 or 1. If you're sending a standard notification, set the value of content-available to 0. If you're sending a silent (background) notification, set the value of content-available to 1. Additionally, silent notification payloads can't include the alert, badge, or sound keys. For more information, see Generating a Remote Notification and Pushing Background Updates to Your App on the Apple Developer website.
The key for the sound to play when the recipient receives the push notification. The value for this key is the name of a sound file in your app's main bundle or the Library/Sounds folder in your app's data container. If the sound file can't be found or you specify default for the value, the system plays the default alert sound.
", "APNSMessage$ThreadId" : "The key that represents your app-specific identifier for grouping notifications. If you provide a Notification Content app extension, you can use this value to group your notifications together.
", "APNSMessage$Title" : "The title to display above the notification message on the recipient's device.
", "APNSMessage$Url" : "The URL to open in the recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.
", "APNSPushNotificationTemplate$Body" : "The message body to use in push notifications that are based on the message template.
", "APNSPushNotificationTemplate$MediaUrl" : "The URL of an image or video to display in push notifications that are based on the message template.
", + "APNSPushNotificationTemplate$RawContent" : "The raw, JSON-formatted string to use as the payload for push notifications that are based on the message template. If specified, this value overrides all other content for the message template.
", "APNSPushNotificationTemplate$Sound" : "The key for the sound to play when the recipient receives a push notification that's based on the message template. The value for this key is the name of a sound file in your app's main bundle or the Library/Sounds folder in your app's data container. If the sound file can't be found or you specify default for the value, the system plays the default alert sound.
", "APNSPushNotificationTemplate$Title" : "The title to use in push notifications that are based on the message template. This title appears above the notification message on a recipient's device.
", "APNSPushNotificationTemplate$Url" : "The URL to open in the recipient's default mobile browser, if a recipient taps a push notification that's based on the message template and the value of the Action property is URL.
", @@ -1713,11 +1730,12 @@ "ActivityResponse$State" : "The current status of the activity. Possible values are: PENDING, INITIALIZING, RUNNING, PAUSED, CANCELLED, and COMPLETED.
", "ActivityResponse$TreatmentId" : "The unique identifier for the campaign treatment that the activity applies to. A treatment is a variation of a campaign that's used for A/B testing of a campaign.
", "AddressConfiguration$BodyOverride" : "The message body to use instead of the default message body. This value overrides the default message body.
", - "AddressConfiguration$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.
", + "AddressConfiguration$RawContent" : "The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides all other values for the message.
", "AddressConfiguration$TitleOverride" : "The message title to use instead of the default message title. This value overrides the default message title.
", "AndroidPushNotificationTemplate$Body" : "The message body to use in a push notification that's based on the message template.
", "AndroidPushNotificationTemplate$ImageIconUrl" : "The URL of the large icon image to display in the content view of a push notification that's based on the message template.
", "AndroidPushNotificationTemplate$ImageUrl" : "The URL of an image to display in a push notification that's based on the message template.
", + "AndroidPushNotificationTemplate$RawContent" : "The raw, JSON-formatted string to use as the payload for a push notification that's based on the message template. If specified, this value overrides all other content for the message template.
", "AndroidPushNotificationTemplate$SmallImageIconUrl" : "The URL of the small icon image to display in the status bar and the content view of a push notification that's based on the message template.
", "AndroidPushNotificationTemplate$Sound" : "The sound to play when a recipient receives a push notification that's based on the message template. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.
", "AndroidPushNotificationTemplate$Title" : "The title to use in a push notification that's based on the message template. This title appears above the notification message on a recipient's device.
", @@ -1748,7 +1766,7 @@ "BaiduMessage$IconReference" : "The icon image name of the asset saved in your app.
", "BaiduMessage$ImageIconUrl" : "The URL of the large icon image to display in the content view of the push notification.
", "BaiduMessage$ImageUrl" : "The URL of an image to display in the push notification.
", - "BaiduMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.
", + "BaiduMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.
", "BaiduMessage$SmallImageIconUrl" : "The URL of the small icon image to display in the status bar and the content view of the push notification.
", "BaiduMessage$Sound" : "The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.
", "BaiduMessage$Title" : "The title to display above the notification message on the recipient's device.
", @@ -1759,9 +1777,9 @@ "CampaignDateRangeKpiResponse$KpiName" : "The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.
", "CampaignDateRangeKpiResponse$NextToken" : "The string to use in a subsequent request to get the next page of results in a paginated response. This value is null for the Campaign Metrics resource because the resource returns all results in a single page.
", "CampaignDateRangeKpiResponse$StartTime" : "The first date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.
", - "CampaignEmailMessage$Body" : "The body of the email for recipients whose email clients don't support HTML content.
", + "CampaignEmailMessage$Body" : "The body of the email for recipients whose email clients don't render HTML content.
", "CampaignEmailMessage$FromAddress" : "The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application.
", - "CampaignEmailMessage$HtmlBody" : "The body of the email, in HTML format, for recipients whose email clients support HTML content.
", + "CampaignEmailMessage$HtmlBody" : "The body of the email, in HTML format, for recipients whose email clients render HTML content.
", "CampaignEmailMessage$Title" : "The subject line, or title, of the email.
", "CampaignHook$LambdaFunctionName" : "The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to send messages for a campaign.
", "CampaignHook$WebUrl" : "The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function over HTTPS.
", @@ -1789,7 +1807,7 @@ "CreateTemplateMessageBody$Arn" : "The Amazon Resource Name (ARN) of the message template that was created.
", "CreateTemplateMessageBody$Message" : "The message that's returned from the API for the request to create the message template.
", "CreateTemplateMessageBody$RequestID" : "The unique identifier for the request to create the message template.
", - "DefaultMessage$Body" : "The default message body of the push notification, email, or SMS message.
", + "DefaultMessage$Body" : "The default body of the message.
", "DefaultPushNotificationMessage$Body" : "The default body of the notification message.
", "DefaultPushNotificationMessage$Title" : "The default title to display above the notification message on a recipient's device.
", "DefaultPushNotificationMessage$Url" : "The default URL to open in a recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.
", @@ -1816,16 +1834,20 @@ "EmailMessage$FromAddress" : "The verified email address to send the email message from. The default value is the FromAddress specified for the email channel.
", "EmailMessageActivity$NextActivity" : "The unique identifier for the next activity to perform, after the message is sent.
", "EmailMessageActivity$TemplateName" : "The name of the email template to use for the message.
", - "EmailTemplateRequest$HtmlPart" : "The message body, in HTML format, to use in email messages that are based on the message template. We recommend using HTML format for email clients that support HTML. You can include links, formatted text, and more in an HTML message.
", + "EmailTemplateRequest$DefaultSubstitutions" : "A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.
", + "EmailTemplateRequest$HtmlPart" : "The message body, in HTML format, to use in email messages that are based on the message template. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message.
", "EmailTemplateRequest$Subject" : "The subject line, or title, to use in email messages that are based on the message template.
", - "EmailTemplateRequest$TextPart" : "The message body, in text format, to use in email messages that are based on the message template. We recommend using text format for email clients that don't support HTML and clients that are connected to high-latency networks, such as mobile devices.
", + "EmailTemplateRequest$TemplateDescription" : "A custom description of the message template.
", + "EmailTemplateRequest$TextPart" : "The message body, in plain text format, to use in email messages that are based on the message template. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.
", "EmailTemplateResponse$Arn" : "The Amazon Resource Name (ARN) of the message template.
", "EmailTemplateResponse$CreationDate" : "The date when the message template was created.
", + "EmailTemplateResponse$DefaultSubstitutions" : "The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.
", "EmailTemplateResponse$HtmlPart" : "The message body, in HTML format, that's used in email messages that are based on the message template.
", "EmailTemplateResponse$LastModifiedDate" : "The date when the message template was last modified.
", "EmailTemplateResponse$Subject" : "The subject line, or title, that's used in email messages that are based on the message template.
", + "EmailTemplateResponse$TemplateDescription" : "The custom description of the message template.
", "EmailTemplateResponse$TemplateName" : "The name of the message template.
", - "EmailTemplateResponse$TextPart" : "The message body, in text format, that's used in email messages that are based on the message template.
", + "EmailTemplateResponse$TextPart" : "The message body, in plain text format, that's used in email messages that are based on the message template.
", "EndpointBatchItem$Address" : "The destination address for messages or push notifications that you send to the endpoint. The address varies by channel. For a push-notification channel, use the token provided by the push notification service, such as an Apple Push Notification service (APNs) device token or a Firebase Cloud Messaging (FCM) registration token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. For the email channel, use an email address.
", "EndpointBatchItem$EffectiveDate" : "The date and time, in ISO 8601 format, when the endpoint was created or updated.
", "EndpointBatchItem$EndpointStatus" : "Specifies whether to send messages or push notifications to the endpoint. Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, messages aren’t sent to the endpoint.
Amazon Pinpoint automatically sets this value to ACTIVE when you create an endpoint or update an existing endpoint. Amazon Pinpoint automatically sets this value to INACTIVE if you update another endpoint that has the same address specified by the Address property.
", @@ -1864,7 +1886,7 @@ "EndpointResponse$OptOut" : "Specifies whether the user who's associated with the endpoint has opted out of receiving messages and push notifications from you. Possible values are: ALL, the user has opted out and doesn't want to receive any messages or push notifications; and, NONE, the user hasn't opted out and wants to receive all messages and push notifications.
", "EndpointResponse$RequestId" : "The unique identifier for the most recent request to update the endpoint.
", "EndpointSendConfiguration$BodyOverride" : "The body of the message. If specified, this value overrides the default message body.
", - "EndpointSendConfiguration$RawContent" : "The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides the message.
", + "EndpointSendConfiguration$RawContent" : "The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides all other values for the message.
", "EndpointSendConfiguration$TitleOverride" : "The title or subject line of the message. If specified, this value overrides the default message title or subject line.
", "EndpointUser$UserId" : "The unique identifier for the user.
", "Event$AppPackageName" : "The package name of the app that's recording the event.
", @@ -1908,7 +1930,7 @@ "GCMMessage$ImageIconUrl" : "The URL of the large icon image to display in the content view of the push notification.
", "GCMMessage$ImageUrl" : "The URL of an image to display in the push notification.
", "GCMMessage$Priority" : "para>normal - The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.
/listitem>high - The notification is sent immediately and might wake a sleeping device.
Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.
The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.
", - "GCMMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.
", + "GCMMessage$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.
", "GCMMessage$RestrictedPackageName" : "The package name of the application where registration tokens must match in order for the recipient to receive the message.
", "GCMMessage$SmallImageIconUrl" : "The URL of the small icon image to display in the status bar and the content view of the push notification.
", "GCMMessage$Sound" : "The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.
", @@ -1963,7 +1985,7 @@ "Message$ImageUrl" : "The URL of an image to display in the push notification.
", "Message$JsonBody" : "The JSON payload to use for a silent push notification.
", "Message$MediaUrl" : "The URL of the image or video to display in the push notification.
", - "Message$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. This value overrides other values for the message.
", + "Message$RawContent" : "The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.
", "Message$Title" : "The title to display above the notification message on a recipient's device.
", "Message$Url" : "The URL to open in a recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.
", "MessageBody$Message" : "The message that's returned from the API.
", @@ -1997,9 +2019,13 @@ "PublicEndpoint$EndpointStatus" : "Specifies whether to send messages or push notifications to the endpoint. Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, messages aren’t sent to the endpoint.
Amazon Pinpoint automatically sets this value to ACTIVE when you create an endpoint or update an existing endpoint. Amazon Pinpoint automatically sets this value to INACTIVE if you update another endpoint that has the same address specified by the Address property.
", "PublicEndpoint$OptOut" : "Specifies whether the user who's associated with the endpoint has opted out of receiving messages and push notifications from you. Possible values are: ALL, the user has opted out and doesn't want to receive any messages or push notifications; and, NONE, the user hasn't opted out and wants to receive all messages and push notifications.
", "PublicEndpoint$RequestId" : "A unique identifier that's generated each time the endpoint is updated.
", + "PushNotificationTemplateRequest$DefaultSubstitutions" : "A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.
", + "PushNotificationTemplateRequest$TemplateDescription" : "A custom description of the message template.
", "PushNotificationTemplateResponse$Arn" : "The Amazon Resource Name (ARN) of the message template.
", "PushNotificationTemplateResponse$CreationDate" : "The date when the message template was created.
", + "PushNotificationTemplateResponse$DefaultSubstitutions" : "The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.
", "PushNotificationTemplateResponse$LastModifiedDate" : "The date when the message template was last modified.
", + "PushNotificationTemplateResponse$TemplateDescription" : "The custom description of the message template.
", "PushNotificationTemplateResponse$TemplateName" : "The name of the message template.
", "QuietTime$End" : "The specific time when quiet time ends. This value has to use 24-hour notation and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 to represent 2:30 PM.
", "QuietTime$Start" : "The specific time when quiet time begins. This value has to use 24-hour notation and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 to represent 2:30 PM.
", @@ -2023,10 +2049,14 @@ "SMSMessage$OriginationNumber" : "The number to send the SMS message from. This value should be one of the dedicated long or short codes that's assigned to your AWS account. If you don't specify a long or short code, Amazon Pinpoint assigns a random long code to the SMS message and sends the message from that code.
", "SMSMessage$SenderId" : "The sender ID to display as the sender of the message on a recipient's device. Support for sender IDs varies by country or region.
", "SMSTemplateRequest$Body" : "The message body to use in text messages that are based on the message template.
", + "SMSTemplateRequest$DefaultSubstitutions" : "A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.
", + "SMSTemplateRequest$TemplateDescription" : "A custom description of the message template.
", "SMSTemplateResponse$Arn" : "The Amazon Resource Name (ARN) of the message template.
", "SMSTemplateResponse$Body" : "The message body that's used in text messages that are based on the message template.
", "SMSTemplateResponse$CreationDate" : "The date when the message template was created.
", + "SMSTemplateResponse$DefaultSubstitutions" : "The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.
", "SMSTemplateResponse$LastModifiedDate" : "The date when the message template was last modified.
", + "SMSTemplateResponse$TemplateDescription" : "The custom description of the message template.
", "SMSTemplateResponse$TemplateName" : "The name of the message template.
", "Schedule$EndTime" : "The scheduled time, in ISO 8601 format, when the campaign ended or will end.
", "Schedule$StartTime" : "The scheduled time, in ISO 8601 format, when the campaign began or will begin.
", @@ -2055,7 +2085,9 @@ "Template$Name" : "The name of the message template to use for the message. If specified, this value must match the name of an existing message template.
", "TemplateResponse$Arn" : "The Amazon Resource Name (ARN) of the message template.
", "TemplateResponse$CreationDate" : "The date when the message template was created.
", + "TemplateResponse$DefaultSubstitutions" : "The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.
", "TemplateResponse$LastModifiedDate" : "The date when the message template was last modified.
", + "TemplateResponse$TemplateDescription" : "The custom description of the message template.
", "TemplateResponse$TemplateName" : "The name of the message template.
", "TemplatesResponse$NextToken" : "The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
", "TreatmentResource$Id" : "The unique identifier for the treatment.
", @@ -2067,17 +2099,31 @@ "VoiceChannelResponse$LastModifiedBy" : "The user who last modified the voice channel.
", "VoiceChannelResponse$LastModifiedDate" : "The date and time, in ISO 8601 format, when the voice channel was last modified.
", "VoiceChannelResponse$Platform" : "The type of messaging or notification platform for the channel. For the voice channel, this value is VOICE.
", - "VoiceMessage$Body" : "The text script for the voice message.
", - "VoiceMessage$LanguageCode" : "The language to use when delivering the message. For a list of supported languages, see the Amazon Polly Developer Guide.
", + "VoiceMessage$Body" : "The text of the script to use for the voice message.
", + "VoiceMessage$LanguageCode" : "The code for the language to use when synthesizing the text of the message script. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.
", "VoiceMessage$OriginationNumber" : "The long code to send the voice message from. This value should be one of the dedicated long codes that's assigned to your AWS account. Although it isn't required, we recommend that you specify the long code in E.164 format, for example +12065550100, to ensure prompt and accurate delivery of the message.
", - "VoiceMessage$VoiceId" : "The name of the voice to use when delivering the message. For a list of supported voices, see the Amazon Polly Developer Guide.
", + "VoiceMessage$VoiceId" : "The name of the voice to use when delivering the message. For a list of supported voices, see the Amazon Polly Developer Guide.
", + "VoiceTemplateRequest$Body" : "The text of the script to use in messages that are based on the message template, in plain text format.
", + "VoiceTemplateRequest$DefaultSubstitutions" : "A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.
", + "VoiceTemplateRequest$LanguageCode" : "The code for the language to use when synthesizing the text of the script in messages that are based on the message template. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.
", + "VoiceTemplateRequest$TemplateDescription" : "A custom description of the message template.
", + "VoiceTemplateRequest$VoiceId" : "The name of the voice to use when delivering messages that are based on the message template. For a list of supported voices, see the Amazon Polly Developer Guide.
", + "VoiceTemplateResponse$Arn" : "The Amazon Resource Name (ARN) of the message template.
", + "VoiceTemplateResponse$Body" : "The text of the script that's used in messages that are based on the message template, in plain text format.
", + "VoiceTemplateResponse$CreationDate" : "The date when the message template was created.
", + "VoiceTemplateResponse$DefaultSubstitutions" : "The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.
", + "VoiceTemplateResponse$LanguageCode" : "The code for the language that's used when synthesizing the text of the script in messages that are based on the message template. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.
", + "VoiceTemplateResponse$LastModifiedDate" : "The date when the message template was last modified.
", + "VoiceTemplateResponse$TemplateDescription" : "The custom description of the message template.
", + "VoiceTemplateResponse$TemplateName" : "The name of the message template.
", + "VoiceTemplateResponse$VoiceId" : "The name of the voice that's used when delivering messages that are based on the message template. For a list of supported voices, see the Amazon Polly Developer Guide.
", "WaitActivity$NextActivity" : "The unique identifier for the next activity to perform, after performing the wait activity.
", - "WaitTime$WaitFor" : "The amount of time, as a duration in ISO 8601 format, to wait before determining whether the activity's conditions have been met or moving participants to the next activity in the journey.
", + "WaitTime$WaitFor" : "The amount of time to wait, as a duration in ISO 8601 format, before determining whether the activity's conditions have been met or moving participants to the next activity in the journey.
", "WaitTime$WaitUntil" : "The date and time, in ISO 8601 format, when Amazon Pinpoint determines whether the activity's conditions have been met or the activity moves participants to the next activity in the journey.
", - "WriteCampaignRequest$Description" : "The custom description of the campaign.
", + "WriteCampaignRequest$Description" : "A custom description of the campaign.
", "WriteCampaignRequest$Name" : "The custom name of the campaign.
", "WriteCampaignRequest$SegmentId" : "The unique identifier for the segment to associate with the campaign.
", - "WriteCampaignRequest$TreatmentDescription" : "The custom description of a variation of the campaign to use for A/B testing.
", + "WriteCampaignRequest$TreatmentDescription" : "A custom description of a variation of the campaign to use for A/B testing.
", "WriteCampaignRequest$TreatmentName" : "The custom name of a variation of the campaign to use for A/B testing.
", "WriteEventStream$DestinationStreamArn" : "The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.
For a Kinesis data stream, the ARN format is: arn:aws:kinesis:
For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:
The AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to publish event data to the stream in your AWS account.
", @@ -2087,7 +2133,7 @@ "WriteJourneyRequest$RefreshFrequency" : "The frequency with which Amazon Pinpoint evaluates segment and event data for the journey, as a duration in ISO 8601 format.
", "WriteJourneyRequest$StartActivity" : "The unique identifier for the first activity in the journey.
", "WriteSegmentRequest$Name" : "The name of the segment.
", - "WriteTreatmentResource$TreatmentDescription" : "The custom description of the treatment.
", + "WriteTreatmentResource$TreatmentDescription" : "A custom description of the treatment.
", "WriteTreatmentResource$TreatmentName" : "The custom name of the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.
", "ListOf__string$member" : null, "MapOf__string$member" : null diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 054b5e433cf..925b05d0c69 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -11,6 +11,80 @@ "uid":"quicksight-2018-04-01" }, "operations":{ + "CancelIngestion":{ + "name":"CancelIngestion", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" + }, + "input":{"shape":"CancelIngestionRequest"}, + "output":{"shape":"CancelIngestionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDashboard":{ + "name":"CreateDashboard", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"CreateDashboardRequest"}, + "output":{"shape":"CreateDashboardResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDataSet":{ + "name":"CreateDataSet", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets" + }, + "input":{"shape":"CreateDataSetRequest"}, + "output":{"shape":"CreateDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDataSource":{ + "name":"CreateDataSource", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sources" + }, + "input":{"shape":"CreateDataSourceRequest"}, + "output":{"shape":"CreateDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, "CreateGroup":{ "name":"CreateGroup", "http":{ @@ -49,6 +123,128 @@ {"shape":"ResourceUnavailableException"} ] }, + "CreateIAMPolicyAssignment":{ + "name":"CreateIAMPolicyAssignment", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/" + }, + "input":{"shape":"CreateIAMPolicyAssignmentRequest"}, + "output":{"shape":"CreateIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateIngestion":{ + "name":"CreateIngestion", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" + }, + "input":{"shape":"CreateIngestionRequest"}, + "output":{"shape":"CreateIngestionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateTemplate":{ + "name":"CreateTemplate", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"CreateTemplateRequest"}, + "output":{"shape":"CreateTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateTemplateAlias":{ + "name":"CreateTemplateAlias", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"CreateTemplateAliasRequest"}, + "output":{"shape":"CreateTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDashboard":{ + "name":"DeleteDashboard", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"DeleteDashboardRequest"}, + "output":{"shape":"DeleteDashboardResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDataSet":{ + "name":"DeleteDataSet", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" + }, + "input":{"shape":"DeleteDataSetRequest"}, + "output":{"shape":"DeleteDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" + }, + "input":{"shape":"DeleteDataSourceRequest"}, + "output":{"shape":"DeleteDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteGroup":{ "name":"DeleteGroup", "http":{ @@ -85,6 +281,57 @@ {"shape":"ResourceUnavailableException"} ] }, + "DeleteIAMPolicyAssignment":{ + "name":"DeleteIAMPolicyAssignment", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/namespace/{Namespace}/iam-policy-assignments/{AssignmentName}" + }, + "input":{"shape":"DeleteIAMPolicyAssignmentRequest"}, + "output":{"shape":"DeleteIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteTemplate":{ + "name":"DeleteTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"DeleteTemplateRequest"}, + "output":{"shape":"DeleteTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteTemplateAlias":{ + "name":"DeleteTemplateAlias", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"DeleteTemplateAliasRequest"}, + "output":{"shape":"DeleteTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -119,184 +366,214 @@ {"shape":"ResourceUnavailableException"} ] }, - "DescribeGroup":{ - "name":"DescribeGroup", + "DescribeDashboard":{ + "name":"DescribeDashboard", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" }, - "input":{"shape":"DescribeGroupRequest"}, - "output":{"shape":"DescribeGroupResponse"}, + "input":{"shape":"DescribeDashboardRequest"}, + "output":{"shape":"DescribeDashboardResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} ] }, - "DescribeUser":{ - "name":"DescribeUser", + "DescribeDashboardPermissions":{ + "name":"DescribeDashboardPermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions" }, - "input":{"shape":"DescribeUserRequest"}, - "output":{"shape":"DescribeUserResponse"}, + "input":{"shape":"DescribeDashboardPermissionsRequest"}, + "output":{"shape":"DescribeDashboardPermissionsResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} ] }, - "GetDashboardEmbedUrl":{ - "name":"GetDashboardEmbedUrl", + "DescribeDataSet":{ + "name":"DescribeDataSet", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url" + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" }, - "input":{"shape":"GetDashboardEmbedUrlRequest"}, - "output":{"shape":"GetDashboardEmbedUrlResponse"}, + "input":{"shape":"DescribeDataSetRequest"}, + "output":{"shape":"DescribeDataSetResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceExistsException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"DomainNotWhitelistedException"}, - {"shape":"QuickSightUserNotFoundException"}, - {"shape":"IdentityTypeNotSupportedException"}, - {"shape":"SessionLifetimeInMinutesInvalidException"}, - {"shape":"UnsupportedUserEditionException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListGroupMemberships":{ - "name":"ListGroupMemberships", + "DescribeDataSetPermissions":{ + "name":"DescribeDataSetPermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members" + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions" }, - "input":{"shape":"ListGroupMembershipsRequest"}, - "output":{"shape":"ListGroupMembershipsResponse"}, + "input":{"shape":"DescribeDataSetPermissionsRequest"}, + "output":{"shape":"DescribeDataSetPermissionsResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListGroups":{ - "name":"ListGroups", + "DescribeDataSource":{ + "name":"DescribeDataSource", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups" + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" }, - "input":{"shape":"ListGroupsRequest"}, - "output":{"shape":"ListGroupsResponse"}, + "input":{"shape":"DescribeDataSourceRequest"}, + "output":{"shape":"DescribeDataSourceResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListUserGroups":{ - "name":"ListUserGroups", + "DescribeDataSourcePermissions":{ + "name":"DescribeDataSourcePermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups" + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions" }, - "input":{"shape":"ListUserGroupsRequest"}, - "output":{"shape":"ListUserGroupsResponse"}, + "input":{"shape":"DescribeDataSourcePermissionsRequest"}, + "output":{"shape":"DescribeDataSourcePermissionsResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListUsers":{ - "name":"ListUsers", + "DescribeGroup":{ + "name":"DescribeGroup", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" }, - "input":{"shape":"ListUsersRequest"}, - "output":{"shape":"ListUsersResponse"}, + "input":{"shape":"DescribeGroupRequest"}, + "output":{"shape":"DescribeGroupResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ] }, - "RegisterUser":{ - "name":"RegisterUser", + "DescribeIAMPolicyAssignment":{ + "name":"DescribeIAMPolicyAssignment", "http":{ - "method":"POST", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}" }, - "input":{"shape":"RegisterUserRequest"}, - "output":{"shape":"RegisterUserResponse"}, + "input":{"shape":"DescribeIAMPolicyAssignmentRequest"}, + "output":{"shape":"DescribeIAMPolicyAssignmentResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"LimitExceededException"}, - {"shape":"ResourceExistsException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} ] }, - "UpdateGroup":{ - "name":"UpdateGroup", + "DescribeIngestion":{ + "name":"DescribeIngestion", "http":{ - "method":"PUT", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" }, - "input":{"shape":"UpdateGroupRequest"}, - "output":{"shape":"UpdateGroupResponse"}, + "input":{"shape":"DescribeIngestionRequest"}, + "output":{"shape":"DescribeIngestionResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} ] }, - "UpdateUser":{ - "name":"UpdateUser", + "DescribeTemplate":{ + "name":"DescribeTemplate", "http":{ - "method":"PUT", + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"DescribeTemplateRequest"}, + "output":{"shape":"DescribeTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeTemplateAlias":{ + "name":"DescribeTemplateAlias", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"DescribeTemplateAliasRequest"}, + "output":{"shape":"DescribeTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeTemplatePermissions":{ + "name":"DescribeTemplatePermissions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/permissions" + }, + "input":{"shape":"DescribeTemplatePermissionsRequest"}, + "output":{"shape":"DescribeTemplatePermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeUser":{ + "name":"DescribeUser", + "http":{ + "method":"GET", "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" }, - "input":{"shape":"UpdateUserRequest"}, - "output":{"shape":"UpdateUserResponse"}, + "input":{"shape":"DescribeUserRequest"}, + "output":{"shape":"DescribeUserResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, @@ -305,133 +582,4097 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ] - } - }, - "shapes":{ - "AccessDeniedException":{ + }, + "GetDashboardEmbedUrl":{ + "name":"GetDashboardEmbedUrl", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url" + }, + "input":{"shape":"GetDashboardEmbedUrlRequest"}, + "output":{"shape":"GetDashboardEmbedUrlResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"DomainNotWhitelistedException"}, + {"shape":"QuickSightUserNotFoundException"}, + {"shape":"IdentityTypeNotSupportedException"}, + {"shape":"SessionLifetimeInMinutesInvalidException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDashboardVersions":{ + "name":"ListDashboardVersions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions" + }, + "input":{"shape":"ListDashboardVersionsRequest"}, + "output":{"shape":"ListDashboardVersionsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards" + }, + "input":{"shape":"ListDashboardsRequest"}, + "output":{"shape":"ListDashboardsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDataSets":{ + "name":"ListDataSets", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets" + }, + "input":{"shape":"ListDataSetsRequest"}, + "output":{"shape":"ListDataSetsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDataSources":{ + "name":"ListDataSources", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sources" + }, + "input":{"shape":"ListDataSourcesRequest"}, + "output":{"shape":"ListDataSourcesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListGroupMemberships":{ + "name":"ListGroupMemberships", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members" + }, + "input":{"shape":"ListGroupMembershipsRequest"}, + "output":{"shape":"ListGroupMembershipsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListGroups":{ + "name":"ListGroups", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups" + }, + "input":{"shape":"ListGroupsRequest"}, + "output":{"shape":"ListGroupsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListIAMPolicyAssignments":{ + "name":"ListIAMPolicyAssignments", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments" + }, + "input":{"shape":"ListIAMPolicyAssignmentsRequest"}, + "output":{"shape":"ListIAMPolicyAssignmentsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListIAMPolicyAssignmentsForUser":{ + "name":"ListIAMPolicyAssignmentsForUser", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/iam-policy-assignments" + }, + "input":{"shape":"ListIAMPolicyAssignmentsForUserRequest"}, + "output":{"shape":"ListIAMPolicyAssignmentsForUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListIngestions":{ + "name":"ListIngestions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions" + }, + "input":{"shape":"ListIngestionsRequest"}, + "output":{"shape":"ListIngestionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplateAliases":{ + "name":"ListTemplateAliases", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases" + }, + "input":{"shape":"ListTemplateAliasesRequest"}, + "output":{"shape":"ListTemplateAliasesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplateVersions":{ + "name":"ListTemplateVersions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/versions" + }, + "input":{"shape":"ListTemplateVersionsRequest"}, + "output":{"shape":"ListTemplateVersionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplates":{ + "name":"ListTemplates", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates" + }, + "input":{"shape":"ListTemplatesRequest"}, + "output":{"shape":"ListTemplatesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListUserGroups":{ + "name":"ListUserGroups", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups" + }, + "input":{"shape":"ListUserGroupsRequest"}, + "output":{"shape":"ListUserGroupsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "RegisterUser":{ + "name":"RegisterUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + }, + "input":{"shape":"RegisterUserRequest"}, + "output":{"shape":"RegisterUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboard":{ + "name":"UpdateDashboard", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"UpdateDashboardRequest"}, + "output":{"shape":"UpdateDashboardResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboardPermissions":{ + "name":"UpdateDashboardPermissions", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions" + }, + "input":{"shape":"UpdateDashboardPermissionsRequest"}, + "output":{"shape":"UpdateDashboardPermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboardPublishedVersion":{ + "name":"UpdateDashboardPublishedVersion", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions/{VersionNumber}" + }, + "input":{"shape":"UpdateDashboardPublishedVersionRequest"}, + "output":{"shape":"UpdateDashboardPublishedVersionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSet":{ + "name":"UpdateDataSet", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" + }, + "input":{"shape":"UpdateDataSetRequest"}, + "output":{"shape":"UpdateDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSetPermissions":{ + "name":"UpdateDataSetPermissions", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions" + }, + "input":{"shape":"UpdateDataSetPermissionsRequest"}, + "output":{"shape":"UpdateDataSetPermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSource":{ + "name":"UpdateDataSource", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" + }, + "input":{"shape":"UpdateDataSourceRequest"}, + "output":{"shape":"UpdateDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSourcePermissions":{ + "name":"UpdateDataSourcePermissions", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions" + }, + "input":{"shape":"UpdateDataSourcePermissionsRequest"}, + "output":{"shape":"UpdateDataSourcePermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateGroup":{ + "name":"UpdateGroup", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + }, + "input":{"shape":"UpdateGroupRequest"}, + "output":{"shape":"UpdateGroupResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "UpdateIAMPolicyAssignment":{ + "name":"UpdateIAMPolicyAssignment", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}" + }, + "input":{"shape":"UpdateIAMPolicyAssignmentRequest"}, + "output":{"shape":"UpdateIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplate":{ + "name":"UpdateTemplate", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"UpdateTemplateRequest"}, + "output":{"shape":"UpdateTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplateAlias":{ + "name":"UpdateTemplateAlias", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"UpdateTemplateAliasRequest"}, + "output":{"shape":"UpdateTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplatePermissions":{ + "name":"UpdateTemplatePermissions", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/permissions" + }, + "input":{"shape":"UpdateTemplatePermissionsRequest"}, + "output":{"shape":"UpdateTemplatePermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" + }, + "input":{"shape":"UpdateUserRequest"}, + "output":{"shape":"UpdateUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "ActionList":{ + "type":"list", + "member":{"shape":"String"}, + "max":16, + "min":1 + }, + "ActiveIAMPolicyAssignment":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "PolicyArn":{"shape":"Arn"} + } + }, + "ActiveIAMPolicyAssignmentList":{ + "type":"list", + "member":{"shape":"ActiveIAMPolicyAssignment"} + }, + "AdHocFilteringOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{"shape":"DashboardBehavior"} + } + }, + "AliasName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w\\-]+|(\\$LATEST)|(\\$PUBLISHED)" + }, + "AmazonElasticsearchParameters":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "Arn":{"type":"string"}, + "AssignmentStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DRAFT", + "DISABLED" + ] + }, + "AthenaParameters":{ + "type":"structure", + "members":{ + "WorkGroup":{"shape":"WorkGroup"} + } + }, + "AuroraParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "AuroraPostgreSqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]{12}$" + }, + "AwsIotAnalyticsParameters":{ + "type":"structure", + "required":["DataSetName"], + "members":{ + "DataSetName":{"shape":"DataSetName"} + } + }, + "Boolean":{"type":"boolean"}, + "CalculatedColumn":{ + "type":"structure", + "required":[ + "ColumnName", + "ColumnId", + "Expression" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "ColumnId":{"shape":"ColumnId"}, + "Expression":{"shape":"Expression"} + } + }, + "CalculatedColumnList":{ + "type":"list", + "member":{"shape":"CalculatedColumn"}, + "max":128, + "min":1 + }, + "CancelIngestionRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "IngestionId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + } + } + }, + "CancelIngestionResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CastColumnTypeOperation":{ + "type":"structure", + "required":[ + "ColumnName", + "NewColumnType" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "NewColumnType":{"shape":"ColumnDataType"}, + "Format":{"shape":"TypeCastFormat"} + } + }, + "Catalog":{ + "type":"string", + "max":128 + }, + "ClusterId":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DECIMAL", + "DATETIME" + ] + }, + "ColumnGroup":{ + "type":"structure", + "members":{ + "GeoSpatialColumnGroup":{"shape":"GeoSpatialColumnGroup"} + } + }, + "ColumnGroupColumnSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + } + }, + "ColumnGroupColumnSchemaList":{ + "type":"list", + "member":{"shape":"ColumnGroupColumnSchema"}, + "max":500 + }, + "ColumnGroupList":{ + "type":"list", + "member":{"shape":"ColumnGroup"}, + "max":8, + "min":1 + }, + "ColumnGroupName":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnGroupSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ColumnGroupColumnSchemaList":{"shape":"ColumnGroupColumnSchemaList"} + } + }, + "ColumnGroupSchemaList":{ + "type":"list", + "member":{"shape":"ColumnGroupSchema"}, + "max":500 + }, + "ColumnId":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":16, + "min":1 + }, + "ColumnName":{ + "type":"string", + "max":128, + "min":1 + }, + "ColumnSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "DataType":{"shape":"String"}, + "GeographicRole":{"shape":"String"} + } + }, + "ColumnSchemaList":{ + "type":"list", + "member":{"shape":"ColumnSchema"}, + "max":500 + }, + "ColumnTag":{ + "type":"structure", + "members":{ + "ColumnGeographicRole":{"shape":"GeoSpatialDataRole"} + } + }, + "ColumnTagList":{ + "type":"list", + "member":{"shape":"ColumnTag"}, + "max":16, + "min":1 + }, + "ConcurrentUpdatingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateColumnsOperation":{ + "type":"structure", + "required":["Columns"], + "members":{ + "Columns":{"shape":"CalculatedColumnList"} + } + }, + "CreateDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "Name", + "SourceEntity" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "Name":{"shape":"DashboardName"}, + "Parameters":{"shape":"Parameters"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "SourceEntity":{"shape":"DashboardSourceEntity"}, + "Tags":{"shape":"TagList"}, + "VersionDescription":{"shape":"VersionDescription"}, + "DashboardPublishOptions":{"shape":"DashboardPublishOptions"} + } + }, + "CreateDashboardResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CreateDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "Name", + "PhysicalTableMap", + "ImportMode" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "IngestionArn":{"shape":"Arn"}, + "IngestionId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId", + "Name", + "Type" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "Type":{"shape":"DataSourceType"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "Credentials":{"shape":"DataSourceCredentials"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDataSourceResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateGroupMembershipRequest":{ + "type":"structure", + "required":[ + "MemberName", + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "MemberName":{ + "shape":"GroupMemberName", + "location":"uri", + "locationName":"MemberName" + }, + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateGroupMembershipResponse":{ + "type":"structure", + "members":{ + "GroupMember":{"shape":"GroupMember"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"GroupDescription"}, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateGroupResponse":{ + "type":"structure", + "members":{ + "Group":{"shape":"Group"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "AssignmentStatus", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentId":{"shape":"String"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateIngestionRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "IngestionId", + "AwsAccountId" + ], + "members":{ + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + } + } + }, + "CreateIngestionResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "IngestionStatus":{"shape":"IngestionStatus"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName", + "TemplateVersionNumber" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + }, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } + }, + "CreateTemplateAliasResponse":{ + "type":"structure", + "members":{ + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CreateTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "SourceEntity" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "Name":{"shape":"TemplateName"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "SourceEntity":{"shape":"TemplateSourceEntity"}, + "Tags":{"shape":"TagList"}, + "VersionDescription":{"shape":"VersionDescription"} + } + }, + "CreateTemplateResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CredentialPair":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"Username"}, + "Password":{"shape":"Password"} + } + }, + "CustomSql":{ + "type":"structure", + "required":[ + "DataSourceArn", + "Name", + "SqlQuery" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "Name":{"shape":"CustomSqlName"}, + "SqlQuery":{"shape":"SqlQuery"}, + "Columns":{"shape":"InputColumnList"} + } + }, + "CustomSqlName":{ + "type":"string", + "max":64, + "min":1 + }, + "Dashboard":{ + "type":"structure", + "members":{ + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Arn":{"shape":"Arn"}, + "Name":{"shape":"DashboardName"}, + "Version":{"shape":"DashboardVersion"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastPublishedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"} + } + }, + "DashboardBehavior":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "DashboardError":{ + "type":"structure", + "members":{ + "Type":{"shape":"DashboardErrorType"}, + "Message":{"shape":"NonEmptyString"} + } + }, + "DashboardErrorList":{ + "type":"list", + "member":{"shape":"DashboardError"}, + "min":1 + }, + "DashboardErrorType":{ + "type":"string", + "enum":[ + "DATA_SET_NOT_FOUND", + "INTERNAL_FAILURE", + "PARAMETER_VALUE_INCOMPATIBLE", + "PARAMETER_TYPE_INVALID", + "PARAMETER_NOT_FOUND", + "COLUMN_TYPE_MISMATCH", + "COLUMN_GEOGRAPHIC_ROLE_MISMATCH", + "COLUMN_REPLACEMENT_MISSING" + ] + }, + "DashboardName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "DashboardPublishOptions":{ + "type":"structure", + "members":{ + "AdHocFilteringOption":{"shape":"AdHocFilteringOption"}, + "ExportToCSVOption":{"shape":"ExportToCSVOption"}, + "SheetControlsOption":{"shape":"SheetControlsOption"} + } + }, + "DashboardSourceEntity":{ + "type":"structure", + "members":{ + "SourceTemplate":{"shape":"DashboardSourceTemplate"} + } + }, + "DashboardSourceTemplate":{ + "type":"structure", + "required":[ + "DataSetReferences", + "Arn" + ], + "members":{ + "DataSetReferences":{"shape":"DataSetReferenceList"}, + "Arn":{"shape":"Arn"} + } + }, + "DashboardSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Name":{"shape":"DashboardName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "PublishedVersionNumber":{"shape":"VersionNumber"}, + "LastPublishedTime":{"shape":"Timestamp"} + } + }, + "DashboardSummaryList":{ + "type":"list", + "member":{"shape":"DashboardSummary"}, + "max":100 + }, + "DashboardUIState":{ + "type":"string", + "enum":[ + "EXPANDED", + "COLLAPSED" + ] + }, + "DashboardVersion":{ + "type":"structure", + "members":{ + "CreatedTime":{"shape":"Timestamp"}, + "Errors":{"shape":"DashboardErrorList"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "Arn":{"shape":"Arn"}, + "SourceEntityArn":{"shape":"Arn"}, + "Description":{"shape":"VersionDescription"} + } + }, + "DashboardVersionSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "CreatedTime":{"shape":"Timestamp"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "SourceEntityArn":{"shape":"Arn"}, + "Description":{"shape":"VersionDescription"} + } + }, + "DashboardVersionSummaryList":{ + "type":"list", + "member":{"shape":"DashboardVersionSummary"}, + "max":100 + }, + "DataSet":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "OutputColumns":{"shape":"OutputColumnList"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ConsumedSpiceCapacityInBytes":{"shape":"Long"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "DataSetConfiguration":{ + "type":"structure", + "members":{ + "Placeholder":{"shape":"String"}, + "DataSetSchema":{"shape":"DataSetSchema"}, + "ColumnGroupSchemaList":{"shape":"ColumnGroupSchemaList"} + } + }, + "DataSetConfigurationList":{ + "type":"list", + "member":{"shape":"DataSetConfiguration"}, + "max":30 + }, + "DataSetImportMode":{ + "type":"string", + "enum":[ + "SPICE", + "DIRECT_QUERY" + ] + }, + "DataSetName":{ + "type":"string", + "max":128, + "min":1 + }, + "DataSetReference":{ + "type":"structure", + "required":[ + "DataSetPlaceholder", + "DataSetArn" + ], + "members":{ + "DataSetPlaceholder":{"shape":"NonEmptyString"}, + "DataSetArn":{"shape":"Arn"} + } + }, + "DataSetReferenceList":{ + "type":"list", + "member":{"shape":"DataSetReference"}, + "min":1 + }, + "DataSetSchema":{ + "type":"structure", + "members":{ + "ColumnSchemaList":{"shape":"ColumnSchemaList"} + } + }, + "DataSetSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "DataSetSummaryList":{ + "type":"list", + "member":{"shape":"DataSetSummary"} + }, + "DataSource":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "Type":{"shape":"DataSourceType"}, + "Status":{"shape":"ResourceStatus"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"}, + "ErrorInfo":{"shape":"DataSourceErrorInfo"} + } + }, + "DataSourceCredentials":{ + "type":"structure", + "members":{ + "CredentialPair":{"shape":"CredentialPair"} + }, + "sensitive":true + }, + "DataSourceErrorInfo":{ + "type":"structure", + "members":{ + "Type":{"shape":"DataSourceErrorInfoType"}, + "Message":{"shape":"String"} + } + }, + "DataSourceErrorInfoType":{ + "type":"string", + "enum":[ + "TIMEOUT", + "ENGINE_VERSION_NOT_SUPPORTED", + "UNKNOWN_HOST", + "GENERIC_SQL_FAILURE", + "CONFLICT", + "UNKNOWN" + ] + }, + "DataSourceList":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DataSourceParameters":{ + "type":"structure", + "members":{ + "AmazonElasticsearchParameters":{"shape":"AmazonElasticsearchParameters"}, + "AthenaParameters":{"shape":"AthenaParameters"}, + "AuroraParameters":{"shape":"AuroraParameters"}, + "AuroraPostgreSqlParameters":{"shape":"AuroraPostgreSqlParameters"}, + "AwsIotAnalyticsParameters":{"shape":"AwsIotAnalyticsParameters"}, + "JiraParameters":{"shape":"JiraParameters"}, + "MariaDbParameters":{"shape":"MariaDbParameters"}, + "MySqlParameters":{"shape":"MySqlParameters"}, + "PostgreSqlParameters":{"shape":"PostgreSqlParameters"}, + "PrestoParameters":{"shape":"PrestoParameters"}, + "RdsParameters":{"shape":"RdsParameters"}, + "RedshiftParameters":{"shape":"RedshiftParameters"}, + "S3Parameters":{"shape":"S3Parameters"}, + "ServiceNowParameters":{"shape":"ServiceNowParameters"}, + "SnowflakeParameters":{"shape":"SnowflakeParameters"}, + "SparkParameters":{"shape":"SparkParameters"}, + "SqlServerParameters":{"shape":"SqlServerParameters"}, + "TeradataParameters":{"shape":"TeradataParameters"}, + "TwitterParameters":{"shape":"TwitterParameters"} + } + }, + "DataSourceType":{ + "type":"string", + "enum":[ + "ADOBE_ANALYTICS", + "AMAZON_ELASTICSEARCH", + "ATHENA", + "AURORA", + "AURORA_POSTGRESQL", + "AWS_IOT_ANALYTICS", + "GITHUB", + "JIRA", + "MARIADB", + "MYSQL", + "POSTGRESQL", + "PRESTO", + "REDSHIFT", + "S3", + "SALESFORCE", + "SERVICENOW", + "SNOWFLAKE", + "SPARK", + "SQLSERVER", + "TERADATA", + "TWITTER" + ] + }, + "Database":{ + "type":"string", + "max":128, + "min":1 + }, + "DateTimeParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"TimestampList"} + } + }, + "DateTimeParameterList":{ + "type":"list", + "member":{"shape":"DateTimeParameter"}, + "max":100 + }, + "DecimalParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"DoubleList"} + } + }, + "DecimalParameterList":{ + "type":"list", + "member":{"shape":"DecimalParameter"}, + "max":100 + }, + "DeleteDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + } + } + }, + "DeleteDashboardResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "Arn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "RequestId":{"shape":"String"} + } + }, + "DeleteDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DeleteDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DeleteDataSourceResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteGroupMembershipRequest":{ + "type":"structure", + "required":[ + "MemberName", + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "MemberName":{ + "shape":"GroupMemberName", + "location":"uri", + "locationName":"MemberName" + }, + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteGroupMembershipResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteGroupResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DeleteTemplateAliasResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "AliasName":{"shape":"AliasName"}, + "Arn":{"shape":"Arn"}, + "RequestId":{"shape":"String"} + } + }, + "DeleteTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + } + } + }, + "DeleteTemplateResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Arn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteUserByPrincipalIdRequest":{ + "type":"structure", + "required":[ + "PrincipalId", + "AwsAccountId", + "Namespace" + ], + "members":{ + "PrincipalId":{ + "shape":"String", + "location":"uri", + "locationName":"PrincipalId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteUserByPrincipalIdResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteUserRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteUserResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "Delimiter":{ + "type":"string", + "max":1, + "min":1 + }, + "DescribeDashboardPermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + } + } + }, + "DescribeDashboardPermissionsResponse":{ + "type":"structure", + "members":{ + "DashboardId":{"shape":"RestrictiveResourceId"}, + "DashboardArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + }, + "AliasName":{ + "shape":"AliasName", + "location":"querystring", + "locationName":"alias-name" + } + } + }, + "DescribeDashboardResponse":{ + "type":"structure", + "members":{ + "Dashboard":{"shape":"Dashboard"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeDataSetPermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetPermissionsResponse":{ + "type":"structure", + "members":{ + "DataSetArn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetResponse":{ + "type":"structure", + "members":{ + "DataSet":{"shape":"DataSet"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSourcePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DescribeDataSourcePermissionsResponse":{ + "type":"structure", + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DescribeDataSourceResponse":{ + "type":"structure", + "members":{ + "DataSource":{"shape":"DataSource"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeGroupResponse":{ + "type":"structure", + "members":{ + "Group":{"shape":"Group"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "IAMPolicyAssignment":{"shape":"IAMPolicyAssignment"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeIngestionRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "IngestionId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + } + } + }, + "DescribeIngestionResponse":{ + "type":"structure", + "members":{ + "Ingestion":{"shape":"Ingestion"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DescribeTemplateAliasResponse":{ + "type":"structure", + "members":{ + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeTemplatePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + } + } + }, + "DescribeTemplatePermissionsResponse":{ + "type":"structure", + "members":{ + "TemplateId":{"shape":"RestrictiveResourceId"}, + "TemplateArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + }, + "AliasName":{ + "shape":"AliasName", + "location":"querystring", + "locationName":"alias-name" + } + } + }, + "DescribeTemplateResponse":{ + "type":"structure", + "members":{ + "Template":{"shape":"Template"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeUserRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "Domain":{ + "type":"string", + "max":64, + "min":1 + }, + "DomainNotWhitelistedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Double":{"type":"double"}, + "DoubleList":{ + "type":"list", + "member":{"shape":"Double"} + }, + "EmbeddingUrl":{ + "type":"string", + "sensitive":true + }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "Type":{"shape":"IngestionErrorType"}, + "Message":{"shape":"string"} + } + }, + "ExceptionResourceType":{ + "type":"string", + "enum":[ + "USER", + "GROUP", + "NAMESPACE", + "ACCOUNT_SETTINGS", + "IAMPOLICY_ASSIGNMENT", + "DATA_SOURCE", + "DATA_SET", + "VPC_CONNECTION", + "INGESTION" + ] + }, + "ExportToCSVOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{"shape":"DashboardBehavior"} + } + }, + "Expression":{ + "type":"string", + "max":4096, + "min":1 + }, + "FileFormat":{ + "type":"string", + "enum":[ + "CSV", + "TSV", + "CLF", + "ELF", + "XLSX", + "JSON" + ] + }, + "FilterOperation":{ + "type":"structure", + "required":["ConditionExpression"], + "members":{ + "ConditionExpression":{"shape":"Expression"} + } + }, + "GeoSpatialColumnGroup":{ + "type":"structure", + "required":[ + "Name", + "CountryCode", + "Columns" + ], + "members":{ + "Name":{"shape":"ColumnGroupName"}, + "CountryCode":{"shape":"GeoSpatialCountryCode"}, + "Columns":{"shape":"ColumnList"} + } + }, + "GeoSpatialCountryCode":{ + "type":"string", + "enum":["US"] + }, + "GeoSpatialDataRole":{ + "type":"string", + "enum":[ + "COUNTRY", + "STATE", + "COUNTY", + "CITY", + "POSTCODE", + "LONGITUDE", + "LATITUDE" + ] + }, + "GetDashboardEmbedUrlRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "IdentityType" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "IdentityType":{ + "shape":"IdentityType", + "location":"querystring", + "locationName":"creds-type" + }, + "SessionLifetimeInMinutes":{ + "shape":"SessionLifetimeInMinutes", + "location":"querystring", + "locationName":"session-lifetime" + }, + "UndoRedoDisabled":{ + "shape":"boolean", + "location":"querystring", + "locationName":"undo-redo-disabled" + }, + "ResetDisabled":{ + "shape":"boolean", + "location":"querystring", + "locationName":"reset-disabled" + }, + "UserArn":{ + "shape":"Arn", + "location":"querystring", + "locationName":"user-arn" + } + } + }, + "GetDashboardEmbedUrlResponse":{ + "type":"structure", + "members":{ + "EmbedUrl":{"shape":"EmbeddingUrl"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "Group":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"GroupDescription"}, + "PrincipalId":{"shape":"String"} + } + }, + "GroupDescription":{ + "type":"string", + "max":512, + "min":1 + }, + "GroupList":{ + "type":"list", + "member":{"shape":"Group"} + }, + "GroupMember":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "MemberName":{"shape":"GroupMemberName"} + } + }, + "GroupMemberList":{ + "type":"list", + "member":{"shape":"GroupMember"} + }, + "GroupMemberName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "GroupName":{ + "type":"string", + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "Host":{ + "type":"string", + "max":256, + "min":1 + }, + "IAMPolicyAssignment":{ + "type":"structure", + "members":{ + "AwsAccountId":{"shape":"AwsAccountId"}, + "AssignmentId":{"shape":"String"}, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "AssignmentStatus":{"shape":"AssignmentStatus"} + } + }, + "IAMPolicyAssignmentName":{ + "type":"string", + "min":1, + "pattern":"(?=^.{2,256}$)(?!.*\\s)[0-9a-zA-Z-_.:=+@]*$" + }, + "IAMPolicyAssignmentSummary":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentStatus":{"shape":"AssignmentStatus"} + } + }, + "IAMPolicyAssignmentSummaryList":{ + "type":"list", + "member":{"shape":"IAMPolicyAssignmentSummary"} + }, + "IdentityMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"IdentityNameList"} + }, + "IdentityName":{ + "type":"string", + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "IdentityNameList":{ + "type":"list", + "member":{"shape":"IdentityName"} + }, + "IdentityType":{ + "type":"string", + "enum":[ + "IAM", + "QUICKSIGHT" + ] + }, + "IdentityTypeNotSupportedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Ingestion":{ + "type":"structure", + "required":[ + "Arn", + "IngestionStatus", + "CreatedTime" + ], + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "IngestionStatus":{"shape":"IngestionStatus"}, + "ErrorInfo":{"shape":"ErrorInfo"}, + "RowInfo":{"shape":"RowInfo"}, + "QueueInfo":{"shape":"QueueInfo"}, + "CreatedTime":{"shape":"timestamp"}, + "IngestionTimeInSeconds":{ + "shape":"long", + "box":true + }, + "IngestionSizeInBytes":{ + "shape":"long", + "box":true + }, + "RequestSource":{"shape":"IngestionRequestSource"}, + "RequestType":{"shape":"IngestionRequestType"} + } + }, + "IngestionErrorType":{ + "type":"string", + "enum":[ + "FAILURE_TO_ASSUME_ROLE", + "INGESTION_SUPERSEDED", + "INGESTION_CANCELED", + "DATA_SET_DELETED", + "DATA_SET_NOT_SPICE", + "S3_UPLOADED_FILE_DELETED", + "S3_MANIFEST_ERROR", + "DATA_TOLERANCE_EXCEPTION", + "SPICE_TABLE_NOT_FOUND", + "DATA_SET_SIZE_LIMIT_EXCEEDED", + "ROW_SIZE_LIMIT_EXCEEDED", + "ACCOUNT_CAPACITY_LIMIT_EXCEEDED", + "CUSTOMER_ERROR", + "DATA_SOURCE_NOT_FOUND", + "IAM_ROLE_NOT_AVAILABLE", + "CONNECTION_FAILURE", + "SQL_TABLE_NOT_FOUND", + "PERMISSION_DENIED", + "SSL_CERTIFICATE_VALIDATION_FAILURE", + "OAUTH_TOKEN_FAILURE", + "SOURCE_API_LIMIT_EXCEEDED_FAILURE", + "PASSWORD_AUTHENTICATION_FAILURE", + "SQL_SCHEMA_MISMATCH_ERROR", + "INVALID_DATE_FORMAT", + "INVALID_DATAPREP_SYNTAX", + "SOURCE_RESOURCE_LIMIT_EXCEEDED", + "SQL_INVALID_PARAMETER_VALUE", + "QUERY_TIMEOUT", + "SQL_NUMERIC_OVERFLOW", + "UNRESOLVABLE_HOST", + "UNROUTABLE_HOST", + "SQL_EXCEPTION", + "S3_FILE_INACCESSIBLE", + "IOT_FILE_NOT_FOUND", + "IOT_DATA_SET_FILE_EMPTY", + "INVALID_DATA_SOURCE_CONFIG", + "DATA_SOURCE_AUTH_FAILED", + "DATA_SOURCE_CONNECTION_FAILED", + "FAILURE_TO_PROCESS_JSON_FILE", + "INTERNAL_SERVICE_ERROR" + ] + }, + "IngestionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "IngestionMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "IngestionRequestSource":{ + "type":"string", + "enum":[ + "MANUAL", + "SCHEDULED" + ] + }, + "IngestionRequestType":{ + "type":"string", + "enum":[ + "INITIAL_INGESTION", + "EDIT", + "INCREMENTAL_REFRESH", + "FULL_REFRESH" + ] + }, + "IngestionStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "QUEUED", + "RUNNING", + "FAILED", + "COMPLETED", + "CANCELLED" + ] + }, + "Ingestions":{ + "type":"list", + "member":{"shape":"Ingestion"} + }, + "InputColumn":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{"shape":"ColumnName"}, + "Type":{"shape":"InputColumnDataType"} + } + }, + "InputColumnDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DECIMAL", + "DATETIME", + "BIT", + "BOOLEAN", + "JSON" + ] + }, + "InputColumnList":{ + "type":"list", + "member":{"shape":"InputColumn"}, + "max":2048, + "min":1 + }, + "InstanceId":{ + "type":"string", + "max":64, + "min":1 + }, + "IntegerParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"LongList"} + } + }, + "IntegerParameterList":{ + "type":"list", + "member":{"shape":"IntegerParameter"}, + "max":100 + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "JiraParameters":{ + "type":"structure", + "required":["SiteBaseUrl"], + "members":{ + "SiteBaseUrl":{"shape":"SiteBaseUrl"} + } + }, + "JoinInstruction":{ + "type":"structure", + "required":[ + "LeftOperand", + "RightOperand", + "Type", + "OnClause" + ], + "members":{ + "LeftOperand":{"shape":"LogicalTableId"}, + "RightOperand":{"shape":"LogicalTableId"}, + "Type":{"shape":"JoinType"}, + "OnClause":{"shape":"OnClause"} + } + }, + "JoinType":{ + "type":"string", + "enum":[ + "INNER", + "OUTER", + "LEFT", + "RIGHT" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ListDashboardVersionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDashboardVersionsResponse":{ + "type":"structure", + "members":{ + "DashboardVersionSummaryList":{"shape":"DashboardVersionSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListDashboardsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDashboardsResponse":{ + "type":"structure", + "members":{ + "DashboardSummaryList":{"shape":"DashboardSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListDataSetsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDataSetsResponse":{ + "type":"structure", + "members":{ + "DataSetSummaries":{"shape":"DataSetSummaryList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListDataSourcesRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDataSourcesResponse":{ + "type":"structure", + "members":{ + "DataSources":{"shape":"DataSourceList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListGroupMembershipsRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListGroupMembershipsResponse":{ + "type":"structure", + "members":{ + "GroupMemberList":{"shape":"GroupMemberList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListGroupsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListGroupsResponse":{ + "type":"structure", + "members":{ + "GroupList":{"shape":"GroupList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIAMPolicyAssignmentsForUserRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "UserName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListIAMPolicyAssignmentsForUserResponse":{ + "type":"structure", + "members":{ + "ActiveAssignments":{"shape":"ActiveIAMPolicyAssignmentList"}, + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIAMPolicyAssignmentsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIAMPolicyAssignmentsResponse":{ + "type":"structure", + "members":{ + "IAMPolicyAssignments":{"shape":"IAMPolicyAssignmentSummaryList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIngestionsRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId" + ], + "members":{ + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "NextToken":{ + "shape":"string", + "location":"querystring", + "locationName":"next-token" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "MaxResults":{ + "shape":"IngestionMaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIngestionsResponse":{ + "type":"structure", + "members":{ + "Ingestions":{"shape":"Ingestions"}, + "NextToken":{"shape":"string"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListTemplateAliasesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-result" + } + } + }, + "ListTemplateAliasesResponse":{ + "type":"structure", + "members":{ + "TemplateAliasList":{"shape":"TemplateAliasList"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"} + } + }, + "ListTemplateVersionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListTemplateVersionsResponse":{ + "type":"structure", + "members":{ + "TemplateVersionSummaryList":{"shape":"TemplateVersionSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListTemplatesRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-result" + } + } + }, + "ListTemplatesResponse":{ + "type":"structure", + "members":{ + "TemplateSummaryList":{"shape":"TemplateSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListUserGroupsRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListUserGroupsResponse":{ + "type":"structure", + "members":{ + "GroupList":{"shape":"GroupList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "UserList":{"shape":"UserList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "LogicalTable":{ + "type":"structure", + "required":[ + "Alias", + "Source" + ], + "members":{ + "Alias":{"shape":"LogicalTableAlias"}, + "DataTransforms":{"shape":"TransformOperationList"}, + "Source":{"shape":"LogicalTableSource"} + } + }, + "LogicalTableAlias":{ + "type":"string", + "max":64, + "min":1 + }, + "LogicalTableId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z-]*" + }, + "LogicalTableMap":{ + "type":"map", + "key":{"shape":"LogicalTableId"}, + "value":{"shape":"LogicalTable"}, + "max":32, + "min":1 + }, + "LogicalTableSource":{ + "type":"structure", + "members":{ + "JoinInstruction":{"shape":"JoinInstruction"}, + "PhysicalTableId":{"shape":"PhysicalTableId"} + } + }, + "Long":{"type":"long"}, + "LongList":{ + "type":"list", + "member":{"shape":"Long"} + }, + "ManifestFileLocation":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Key":{"shape":"S3Key"} + } + }, + "MariaDbParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "MySqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "Namespace":{ + "type":"string", + "max":64, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "NonEmptyString":{ + "type":"string", + "pattern":".*\\S.*" + }, + "OnClause":{ + "type":"string", + "max":512, + "min":1 + }, + "OptionalPort":{ + "type":"integer", + "max":65535, + "min":0 + }, + "OutputColumn":{ + "type":"structure", + "members":{ + "Name":{"shape":"ColumnName"}, + "Type":{"shape":"ColumnDataType"} + } + }, + "OutputColumnList":{ + "type":"list", + "member":{"shape":"OutputColumn"} + }, + "Parameters":{ + "type":"structure", + "members":{ + "StringParameters":{"shape":"StringParameterList"}, + "IntegerParameters":{"shape":"IntegerParameterList"}, + "DecimalParameters":{"shape":"DecimalParameterList"}, + "DateTimeParameters":{"shape":"DateTimeParameterList"} + } + }, + "Password":{ + "type":"string", + "max":1024, + "min":1 + }, + "PhysicalTable":{ + "type":"structure", + "members":{ + "RelationalTable":{"shape":"RelationalTable"}, + "CustomSql":{"shape":"CustomSql"}, + "S3Source":{"shape":"S3Source"} + } + }, + "PhysicalTableId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z-]*" + }, + "PhysicalTableMap":{ + "type":"map", + "key":{"shape":"PhysicalTableId"}, + "value":{"shape":"PhysicalTable"}, + "max":16, + "min":1 + }, + "Port":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PositiveInteger":{ + "type":"integer", + "min":1 + }, + "PostgreSqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "PreconditionNotMetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PrestoParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Catalog" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Catalog":{"shape":"Catalog"} + } + }, + "Principal":{ + "type":"string", + "max":256, + "min":1 + }, + "ProjectOperation":{ + "type":"structure", + "required":["ProjectedColumns"], + "members":{ + "ProjectedColumns":{"shape":"ProjectedColumnList"} + } + }, + "ProjectedColumnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":2000, + "min":1 + }, + "Query":{ + "type":"string", + "max":256, + "min":1 + }, + "QueueInfo":{ + "type":"structure", + "required":[ + "WaitingOnIngestion", + "QueuedIngestion" + ], + "members":{ + "WaitingOnIngestion":{"shape":"string"}, + "QueuedIngestion":{"shape":"string"} + } + }, + "QuickSightUserNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RdsParameters":{ + "type":"structure", + "required":[ + "InstanceId", + "Database" + ], + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "Database":{"shape":"Database"} + } + }, + "RedshiftParameters":{ + "type":"structure", + "required":["Database"], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"OptionalPort"}, + "Database":{"shape":"Database"}, + "ClusterId":{"shape":"ClusterId"} + } + }, + "RegisterUserRequest":{ + "type":"structure", + "required":[ + "IdentityType", + "Email", + "UserRole", + "AwsAccountId", + "Namespace" + ], + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "Email":{"shape":"String"}, + "UserRole":{"shape":"UserRole"}, + "IamArn":{"shape":"String"}, + "SessionName":{"shape":"RoleSessionName"}, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "UserName":{"shape":"UserName"} + } + }, + "RegisterUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"}, + "UserInvitationUrl":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "RelationalTable":{ + "type":"structure", + "required":[ + "DataSourceArn", + "Name", + "InputColumns" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "Schema":{"shape":"RelationalTableSchema"}, + "Name":{"shape":"RelationalTableName"}, + "InputColumns":{"shape":"InputColumnList"} + } + }, + "RelationalTableName":{ + "type":"string", + "max":64, + "min":1 + }, + "RelationalTableSchema":{ + "type":"string", + "max":64 + }, + "RenameColumnOperation":{ + "type":"structure", + "required":[ + "ColumnName", + "NewColumnName" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "NewColumnName":{"shape":"ColumnName"} + } + }, + "ResourceExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceId":{"type":"string"}, + "ResourceName":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePermission":{ + "type":"structure", + "required":[ + "Principal", + "Actions" + ], + "members":{ + "Principal":{"shape":"Principal"}, + "Actions":{"shape":"ActionList"} + } + }, + "ResourcePermissionList":{ + "type":"list", + "member":{"shape":"ResourcePermission"}, + "max":64, + "min":1 + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "CREATION_IN_PROGRESS", + "CREATION_SUCCESSFUL", + "CREATION_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_FAILED" + ] + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":503}, + "exception":true + }, + "RestrictiveResourceId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w\\-]+" + }, + "RoleSessionName":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[\\w+=.@-]*" + }, + "RowInfo":{ + "type":"structure", + "members":{ + "RowsIngested":{ + "shape":"long", + "box":true + }, + "RowsDropped":{ + "shape":"long", + "box":true + } + } + }, + "RowLevelPermissionDataSet":{ + "type":"structure", + "required":[ + "Arn", + "PermissionPolicy" + ], + "members":{ + "Arn":{"shape":"Arn"}, + "PermissionPolicy":{"shape":"RowLevelPermissionPolicy"} + } + }, + "RowLevelPermissionPolicy":{ + "type":"string", + "enum":[ + "GRANT_ACCESS", + "DENY_ACCESS" + ] + }, + "S3Bucket":{ + "type":"string", + "max":1024, + "min":1 + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1 + }, + "S3Parameters":{ + "type":"structure", + "required":["ManifestFileLocation"], + "members":{ + "ManifestFileLocation":{"shape":"ManifestFileLocation"} + } + }, + "S3Source":{ + "type":"structure", + "required":[ + "DataSourceArn", + "InputColumns" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "UploadSettings":{"shape":"UploadSettings"}, + "InputColumns":{"shape":"InputColumnList"} + } + }, + "ServiceNowParameters":{ + "type":"structure", + "required":["SiteBaseUrl"], + "members":{ + "SiteBaseUrl":{"shape":"SiteBaseUrl"} + } + }, + "SessionLifetimeInMinutes":{ + "type":"long", + "max":600, + "min":15 + }, + "SessionLifetimeInMinutesInvalidException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SheetControlsOption":{ + "type":"structure", + "members":{ + "VisibilityState":{"shape":"DashboardUIState"} + } + }, + "SiteBaseUrl":{ + "type":"string", + "max":1024, + "min":1 + }, + "SnowflakeParameters":{ "type":"structure", + "required":[ + "Host", + "Database", + "Warehouse" + ], "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":401}, - "exception":true + "Host":{"shape":"Host"}, + "Database":{"shape":"Database"}, + "Warehouse":{"shape":"Warehouse"} + } }, - "Arn":{"type":"string"}, - "AwsAccountId":{ + "SparkParameters":{ + "type":"structure", + "required":[ + "Host", + "Port" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"} + } + }, + "SqlQuery":{ "type":"string", - "max":12, - "min":12, - "pattern":"^[0-9]{12}$" + "max":65536, + "min":1 }, - "Boolean":{"type":"boolean"}, - "CreateGroupMembershipRequest":{ + "SqlServerParameters":{ "type":"structure", "required":[ - "MemberName", - "GroupName", - "AwsAccountId", - "Namespace" + "Host", + "Port", + "Database" ], "members":{ - "MemberName":{ - "shape":"GroupMemberName", - "location":"uri", - "locationName":"MemberName" - }, - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} } }, - "CreateGroupMembershipResponse":{ + "SslProperties":{ "type":"structure", "members":{ - "GroupMember":{"shape":"GroupMember"}, - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "DisableSsl":{"shape":"Boolean"} } }, - "CreateGroupRequest":{ + "StatusCode":{"type":"integer"}, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringParameter":{ "type":"structure", "required":[ - "GroupName", - "AwsAccountId", - "Namespace" + "Name", + "Values" ], "members":{ - "GroupName":{"shape":"GroupName"}, - "Description":{"shape":"GroupDescription"}, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"StringList"} } }, - "CreateGroupResponse":{ + "StringParameterList":{ + "type":"list", + "member":{"shape":"StringParameter"}, + "max":100 + }, + "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ - "Group":{"shape":"Group"}, - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} } }, - "DeleteGroupMembershipRequest":{ + "TagColumnOperation":{ "type":"structure", "required":[ - "MemberName", - "GroupName", - "AwsAccountId", - "Namespace" + "ColumnName", + "Tags" ], "members":{ - "MemberName":{ - "shape":"GroupMemberName", - "location":"uri", - "locationName":"MemberName" - }, - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", + "ColumnName":{"shape":"ColumnName"}, + "Tags":{"shape":"ColumnTagList"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", "location":"uri", - "locationName":"AwsAccountId" + "locationName":"ResourceArn" }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Tags":{"shape":"TagList"} } }, - "DeleteGroupMembershipResponse":{ + "TagResourceResponse":{ "type":"structure", "members":{ "RequestId":{"shape":"String"}, @@ -441,102 +4682,223 @@ } } }, - "DeleteGroupRequest":{ + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Template":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "Name":{"shape":"TemplateName"}, + "Version":{"shape":"TemplateVersion"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "CreatedTime":{"shape":"Timestamp"} + } + }, + "TemplateAlias":{ + "type":"structure", + "members":{ + "AliasName":{"shape":"AliasName"}, + "Arn":{"shape":"Arn"}, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } + }, + "TemplateAliasList":{ + "type":"list", + "member":{"shape":"TemplateAlias"}, + "max":100 + }, + "TemplateError":{ + "type":"structure", + "members":{ + "Type":{"shape":"TemplateErrorType"}, + "Message":{"shape":"NonEmptyString"} + } + }, + "TemplateErrorList":{ + "type":"list", + "member":{"shape":"TemplateError"}, + "min":1 + }, + "TemplateErrorType":{ + "type":"string", + "enum":[ + "DATA_SET_NOT_FOUND", + "INTERNAL_FAILURE" + ] + }, + "TemplateName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "TemplateSourceAnalysis":{ "type":"structure", "required":[ - "GroupName", - "AwsAccountId", - "Namespace" + "Arn", + "DataSetReferences" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Arn":{"shape":"Arn"}, + "DataSetReferences":{"shape":"DataSetReferenceList"} } }, - "DeleteGroupResponse":{ + "TemplateSourceEntity":{ "type":"structure", "members":{ - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "SourceAnalysis":{"shape":"TemplateSourceAnalysis"}, + "SourceTemplate":{"shape":"TemplateSourceTemplate"} } }, - "DeleteUserByPrincipalIdRequest":{ + "TemplateSourceTemplate":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{"shape":"Arn"} + } + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Name":{"shape":"TemplateName"}, + "LatestVersionNumber":{"shape":"VersionNumber"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"} + } + }, + "TemplateSummaryList":{ + "type":"list", + "member":{"shape":"TemplateSummary"}, + "max":100 + }, + "TemplateVersion":{ + "type":"structure", + "members":{ + "CreatedTime":{"shape":"Timestamp"}, + "Errors":{"shape":"TemplateErrorList"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "DataSetConfigurations":{"shape":"DataSetConfigurationList"}, + "Description":{"shape":"VersionDescription"}, + "SourceEntityArn":{"shape":"Arn"} + } + }, + "TemplateVersionSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionNumber":{"shape":"VersionNumber"}, + "CreatedTime":{"shape":"Timestamp"}, + "Status":{"shape":"ResourceStatus"}, + "Description":{"shape":"VersionDescription"} + } + }, + "TemplateVersionSummaryList":{ + "type":"list", + "member":{"shape":"TemplateVersionSummary"}, + "max":100 + }, + "TeradataParameters":{ "type":"structure", "required":[ - "PrincipalId", - "AwsAccountId", - "Namespace" + "Host", + "Port", + "Database" ], "members":{ - "PrincipalId":{ - "shape":"String", - "location":"uri", - "locationName":"PrincipalId" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "TextQualifier":{ + "type":"string", + "enum":[ + "DOUBLE_QUOTE", + "SINGLE_QUOTE" + ] + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TimestampList":{ + "type":"list", + "member":{"shape":"Timestamp"} + }, + "TransformOperation":{ + "type":"structure", + "members":{ + "ProjectOperation":{"shape":"ProjectOperation"}, + "FilterOperation":{"shape":"FilterOperation"}, + "CreateColumnsOperation":{"shape":"CreateColumnsOperation"}, + "RenameColumnOperation":{"shape":"RenameColumnOperation"}, + "CastColumnTypeOperation":{"shape":"CastColumnTypeOperation"}, + "TagColumnOperation":{"shape":"TagColumnOperation"} + } + }, + "TransformOperationList":{ + "type":"list", + "member":{"shape":"TransformOperation"}, + "max":2048, + "min":1 + }, + "TwitterParameters":{ + "type":"structure", + "required":[ + "Query", + "MaxRows" + ], + "members":{ + "Query":{"shape":"Query"}, + "MaxRows":{"shape":"PositiveInteger"} } }, - "DeleteUserByPrincipalIdResponse":{ + "TypeCastFormat":{ + "type":"string", + "max":32 + }, + "UnsupportedUserEditionException":{ "type":"structure", "members":{ - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } - } + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true }, - "DeleteUserRequest":{ + "UntagResourceRequest":{ "type":"structure", "required":[ - "UserName", - "AwsAccountId", - "Namespace" + "ResourceArn", + "TagKeys" ], "members":{ - "UserName":{ - "shape":"UserName", - "location":"uri", - "locationName":"UserName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", + "ResourceArn":{ + "shape":"Arn", "location":"uri", - "locationName":"AwsAccountId" + "locationName":"ResourceArn" }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" + "TagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"keys" } } }, - "DeleteUserResponse":{ + "UntagResourceResponse":{ "type":"structure", "members":{ "RequestId":{"shape":"String"}, @@ -546,35 +4908,33 @@ } } }, - "DescribeGroupRequest":{ + "UpdateDashboardPermissionsRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "DashboardId" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DashboardId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DashboardId" + }, + "GrantPermissions":{"shape":"UpdateResourcePermissionList"}, + "RevokePermissions":{"shape":"UpdateResourcePermissionList"} } }, - "DescribeGroupResponse":{ + "UpdateDashboardPermissionsResponse":{ "type":"structure", "members":{ - "Group":{"shape":"Group"}, + "DashboardArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -582,73 +4942,50 @@ } } }, - "DescribeUserRequest":{ + "UpdateDashboardPublishedVersionRequest":{ "type":"structure", "required":[ - "UserName", "AwsAccountId", - "Namespace" + "DashboardId", + "VersionNumber" ], "members":{ - "UserName":{ - "shape":"UserName", - "location":"uri", - "locationName":"UserName" - }, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DashboardId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"uri", + "locationName":"VersionNumber" } } }, - "DescribeUserResponse":{ + "UpdateDashboardPublishedVersionResponse":{ "type":"structure", "members":{ - "User":{"shape":"User"}, - "RequestId":{"shape":"String"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "DashboardArn":{"shape":"Arn"}, "Status":{ "shape":"StatusCode", "location":"statusCode" - } - } - }, - "DomainNotWhitelistedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, + }, "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "EmbeddingUrl":{ - "type":"string", - "sensitive":true - }, - "ExceptionResourceType":{ - "type":"string", - "enum":[ - "USER", - "GROUP", - "NAMESPACE", - "DATA_SOURCE", - "DATA_SET", - "VPC_CONNECTION", - "INGESTION" - ] + } }, - "GetDashboardEmbedUrlRequest":{ + "UpdateDashboardRequest":{ "type":"structure", "required":[ "AwsAccountId", "DashboardId", - "IdentityType" + "Name", + "SourceEntity" ], "members":{ "AwsAccountId":{ @@ -657,183 +4994,129 @@ "locationName":"AwsAccountId" }, "DashboardId":{ - "shape":"String", + "shape":"RestrictiveResourceId", "location":"uri", "locationName":"DashboardId" }, - "IdentityType":{ - "shape":"IdentityType", - "location":"querystring", - "locationName":"creds-type" - }, - "SessionLifetimeInMinutes":{ - "shape":"SessionLifetimeInMinutes", - "location":"querystring", - "locationName":"session-lifetime" - }, - "UndoRedoDisabled":{ - "shape":"boolean", - "location":"querystring", - "locationName":"undo-redo-disabled" - }, - "ResetDisabled":{ - "shape":"boolean", - "location":"querystring", - "locationName":"reset-disabled" - }, - "UserArn":{ - "shape":"Arn", - "location":"querystring", - "locationName":"user-arn" - } + "Name":{"shape":"DashboardName"}, + "SourceEntity":{"shape":"DashboardSourceEntity"}, + "Parameters":{"shape":"Parameters"}, + "VersionDescription":{"shape":"VersionDescription"}, + "DashboardPublishOptions":{"shape":"DashboardPublishOptions"} } }, - "GetDashboardEmbedUrlResponse":{ + "UpdateDashboardResponse":{ "type":"structure", "members":{ - "EmbedUrl":{"shape":"EmbeddingUrl"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - }, + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{"shape":"StatusCode"}, "RequestId":{"shape":"String"} } }, - "Group":{ + "UpdateDataSetPermissionsRequest":{ "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], "members":{ - "Arn":{"shape":"Arn"}, - "GroupName":{"shape":"GroupName"}, - "Description":{"shape":"GroupDescription"}, - "PrincipalId":{"shape":"String"} + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "GrantPermissions":{"shape":"ResourcePermissionList"}, + "RevokePermissions":{"shape":"ResourcePermissionList"} } }, - "GroupDescription":{ - "type":"string", - "max":512, - "min":1 - }, - "GroupList":{ - "type":"list", - "member":{"shape":"Group"} - }, - "GroupMember":{ + "UpdateDataSetPermissionsResponse":{ "type":"structure", "members":{ - "Arn":{"shape":"Arn"}, - "MemberName":{"shape":"GroupMemberName"} + "DataSetArn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } } }, - "GroupMemberList":{ - "type":"list", - "member":{"shape":"GroupMember"} - }, - "GroupMemberName":{ - "type":"string", - "max":256, - "min":1, - "pattern":"[\\u0020-\\u00FF]+" - }, - "GroupName":{ - "type":"string", - "min":1, - "pattern":"[\\u0020-\\u00FF]+" - }, - "IdentityType":{ - "type":"string", - "enum":[ - "IAM", - "QUICKSIGHT" - ] - }, - "IdentityTypeNotSupportedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "InternalFailureException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "InvalidNextTokenException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "LimitExceededException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":409}, - "exception":true - }, - "ListGroupMembershipsRequest":{ + "UpdateDataSetRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "DataSetId", + "Name", + "PhysicalTableMap", + "ImportMode" ], "members":{ - "GroupName":{ - "shape":"GroupName", + "AwsAccountId":{ + "shape":"AwsAccountId", "location":"uri", - "locationName":"GroupName" - }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" + "locationName":"AwsAccountId" }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" }, + "Name":{"shape":"ResourceName"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "UpdateDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "IngestionArn":{"shape":"Arn"}, + "IngestionId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "UpdateDataSourcePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DataSourceId":{ + "shape":"ResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DataSourceId" + }, + "GrantPermissions":{"shape":"ResourcePermissionList"}, + "RevokePermissions":{"shape":"ResourcePermissionList"} } }, - "ListGroupMembershipsResponse":{ + "UpdateDataSourcePermissionsResponse":{ "type":"structure", "members":{ - "GroupMemberList":{"shape":"GroupMemberList"}, - "NextToken":{"shape":"String"}, + "DataSourceArn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -841,11 +5124,12 @@ } } }, - "ListGroupsRequest":{ + "UpdateDataSourceRequest":{ "type":"structure", "required":[ "AwsAccountId", - "Namespace" + "DataSourceId", + "Name" ], "members":{ "AwsAccountId":{ @@ -853,29 +5137,24 @@ "location":"uri", "locationName":"AwsAccountId" }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" - }, - "Namespace":{ - "shape":"Namespace", + "DataSourceId":{ + "shape":"ResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DataSourceId" + }, + "Name":{"shape":"ResourceName"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "Credentials":{"shape":"DataSourceCredentials"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"} } }, - "ListGroupsResponse":{ + "UpdateDataSourceResponse":{ "type":"structure", "members":{ - "GroupList":{"shape":"GroupList"}, - "NextToken":{"shape":"String"}, + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "UpdateStatus":{"shape":"ResourceStatus"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -883,19 +5162,20 @@ } } }, - "ListUserGroupsRequest":{ + "UpdateGroupRequest":{ "type":"structure", "required":[ - "UserName", + "GroupName", "AwsAccountId", "Namespace" ], "members":{ - "UserName":{ - "shape":"UserName", + "GroupName":{ + "shape":"GroupName", "location":"uri", - "locationName":"UserName" + "locationName":"GroupName" }, + "Description":{"shape":"GroupDescription"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", @@ -905,25 +5185,13 @@ "shape":"Namespace", "location":"uri", "locationName":"Namespace" - }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" } } }, - "ListUserGroupsResponse":{ + "UpdateGroupResponse":{ "type":"structure", "members":{ - "GroupList":{"shape":"GroupList"}, - "NextToken":{"shape":"String"}, + "Group":{"shape":"Group"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -931,10 +5199,11 @@ } } }, - "ListUsersRequest":{ + "UpdateIAMPolicyAssignmentRequest":{ "type":"structure", "required":[ "AwsAccountId", + "AssignmentName", "Namespace" ], "members":{ @@ -943,29 +5212,29 @@ "location":"uri", "locationName":"AwsAccountId" }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" }, "Namespace":{ "shape":"Namespace", "location":"uri", "locationName":"Namespace" - } + }, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"} } }, - "ListUsersResponse":{ + "UpdateIAMPolicyAssignmentResponse":{ "type":"structure", "members":{ - "UserList":{"shape":"UserList"}, - "NextToken":{"shape":"String"}, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentId":{"shape":"String"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -973,66 +5242,77 @@ } } }, - "MaxResults":{ - "type":"integer", + "UpdateResourcePermissionList":{ + "type":"list", + "member":{"shape":"ResourcePermission"}, "max":100, "min":1 }, - "Namespace":{ - "type":"string", - "pattern":"default" - }, - "PreconditionNotMetException":{ + "UpdateTemplateAliasRequest":{ "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName", + "TemplateVersionNumber" + ], "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + }, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } }, - "QuickSightUserNotFoundException":{ + "UpdateTemplateAliasResponse":{ "type":"structure", "members":{ - "Message":{"shape":"String"}, + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":404}, - "exception":true + } }, - "RegisterUserRequest":{ + "UpdateTemplatePermissionsRequest":{ "type":"structure", "required":[ - "IdentityType", - "Email", - "UserRole", "AwsAccountId", - "Namespace" + "TemplateId" ], "members":{ - "IdentityType":{"shape":"IdentityType"}, - "Email":{"shape":"String"}, - "UserRole":{"shape":"UserRole"}, - "IamArn":{"shape":"String"}, - "SessionName":{"shape":"RoleSessionName"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "TemplateId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" + "locationName":"TemplateId" }, - "UserName":{"shape":"UserName"} + "GrantPermissions":{"shape":"UpdateResourcePermissionList"}, + "RevokePermissions":{"shape":"UpdateResourcePermissionList"} } }, - "RegisterUserResponse":{ + "UpdateTemplatePermissionsResponse":{ "type":"structure", "members":{ - "User":{"shape":"User"}, - "UserInvitationUrl":{"shape":"String"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "TemplateArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -1040,111 +5320,41 @@ } } }, - "ResourceExistsException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":409}, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":404}, - "exception":true - }, - "ResourceUnavailableException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":503}, - "exception":true - }, - "RoleSessionName":{ - "type":"string", - "max":64, - "min":2, - "pattern":"[\\w+=.@-]*" - }, - "SessionLifetimeInMinutes":{ - "type":"long", - "max":600, - "min":15 - }, - "SessionLifetimeInMinutesInvalidException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "StatusCode":{"type":"integer"}, - "String":{"type":"string"}, - "ThrottlingException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":429}, - "exception":true - }, - "UnsupportedUserEditionException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "UpdateGroupRequest":{ + "UpdateTemplateRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "TemplateId", + "SourceEntity" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "Description":{"shape":"GroupDescription"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "TemplateId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"TemplateId" + }, + "SourceEntity":{"shape":"TemplateSourceEntity"}, + "VersionDescription":{"shape":"VersionDescription"}, + "Name":{"shape":"TemplateName"} } }, - "UpdateGroupResponse":{ + "UpdateTemplateResponse":{ "type":"structure", "members":{ - "Group":{"shape":"Group"}, - "RequestId":{"shape":"String"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "CreationStatus":{"shape":"ResourceStatus"}, "Status":{ "shape":"StatusCode", "location":"statusCode" - } + }, + "RequestId":{"shape":"String"} } }, "UpdateUserRequest":{ @@ -1187,6 +5397,22 @@ } } }, + "UploadSettings":{ + "type":"structure", + "members":{ + "Format":{"shape":"FileFormat"}, + "StartFromRow":{ + "shape":"PositiveInteger", + "box":true + }, + "ContainsHeader":{ + "shape":"Boolean", + "box":true + }, + "TextQualifier":{"shape":"TextQualifier"}, + "Delimiter":{"shape":"Delimiter"} + } + }, "User":{ "type":"structure", "members":{ @@ -1218,6 +5444,39 @@ "RESTRICTED_READER" ] }, - "boolean":{"type":"boolean"} + "Username":{ + "type":"string", + "max":64, + "min":1 + }, + "VersionDescription":{ + "type":"string", + "max":512, + "min":1 + }, + "VersionNumber":{ + "type":"long", + "min":1 + }, + "VpcConnectionProperties":{ + "type":"structure", + "required":["VpcConnectionArn"], + "members":{ + "VpcConnectionArn":{"shape":"Arn"} + } + }, + "Warehouse":{ + "type":"string", + "max":128 + }, + "WorkGroup":{ + "type":"string", + "max":128, + "min":1 + }, + "boolean":{"type":"boolean"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} } } diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index a1565f3929d..9ec2d684d1e 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -2,22 +2,71 @@ "version": "2.0", "service": "Amazon QuickSight is a fully managed, serverless, cloud business intelligence service that makes it easy to extend data and insights to every user in your organization. This API interface reference contains documentation for a programming interface that you can use to manage Amazon QuickSight.
", "operations": { + "CancelIngestion": "Cancels an on-going ingestion of data into SPICE.
", + "CreateDashboard": "Creates a dashboard from a template. To first create a template, see the CreateTemplate API.
A dashboard is an entity in QuickSight which identifies Quicksight reports, created from analyses. QuickSight dashboards are sharable. With the right permissions, you can create scheduled email reports from them. The CreateDashboard
, DescribeDashboard
and ListDashboardsByUser
APIs act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.
CLI syntax:
aws quicksight create-dashboard --cli-input-json file://create-dashboard.json
Creates a dataset.
CLI syntax:
aws quicksight create-data-set \\
--aws-account-id=111122223333 \\
--data-set-id=unique-data-set-id \\
--name='My dataset' \\
--import-mode=SPICE \\
--physical-table-map='{
\"physical-table-id\": {
\"RelationalTable\": {
\"DataSourceArn\": \"arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id\",
\"Name\": \"table1\",
\"InputColumns\": [
{
\"Name\": \"column1\",
\"Type\": \"STRING\"
}
]
}
}'
Creates a data source.
The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id
CLI syntax:
aws quicksight create-data-source \\
--aws-account-id=111122223333 \\
--data-source-id=unique-data-source-id \\
--name='My Data Source' \\
--type=POSTGRESQL \\
--data-source-parameters='{ \"PostgreSqlParameters\": {
\"Host\": \"my-db-host.example.com\",
\"Port\": 1234,
\"Database\": \"my-db\" } }' \\
--credentials='{ \"CredentialPair\": {
\"Username\": \"username\",
\"Password\": \"password\" } }'
Creates an Amazon QuickSight group.
The permissions resource is arn:aws:quicksight:us-east-1:<relevant-aws-account-id>:group/default/<group-name>
.
The response is a group object.
CLI Sample:
aws quicksight create-group --aws-account-id=111122223333 --namespace=default --group-name=\"Sales-Management\" --description=\"Sales Management - Forecasting\"
Adds an Amazon QuickSight user to an Amazon QuickSight group.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name>
.
The condition resource is the user name.
The condition key is quicksight:UserName
.
The response is the group member object.
CLI Sample:
aws quicksight create-group-membership --aws-account-id=111122223333 --namespace=default --group-name=Sales --member-name=Pat
Creates an assignment with one specified IAM policy ARN and will assigned to specified groups or users of QuickSight. Users and groups need to be in the same namespace.
CLI syntax:
aws quicksight create-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=helpAssignment --policy-arn=arn:aws:iam::aws:policy/AdministratorAccess --identities=\"user=user5,engineer123,group=QS-Admin\" --namespace=default --region=us-west-2
Creates and starts a new SPICE ingestion on a dataset
Any ingestions operating on tagged datasets inherit the same tags automatically for use in access-control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags?. Tags will be visible on the tagged dataset, but not on the ingestion resource.
", + "CreateTemplate": "Creates a template from an existing QuickSight analysis or template. The resulting template can be used to create a dashboard.
A template is an entity in QuickSight which encapsulates the metadata required to create an analysis that can be used to create dashboard. It adds a layer of abstraction by use placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets which follow the same schema that was used to create the source analysis and template.
To create a template from an existing analysis, use the analysis's ARN, aws-account-id
, template-id
, source-entity
, and data-set-references
.
CLI syntax to create a template:
aws quicksight create-template —cli-input-json file://create-template.json
CLI syntax to create a template from another template in the same AWS account:
aws quicksight create-template --aws-account-id 111122223333 --template-id reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0dfc789c-81f6-4f4f-b9ac-7db2453eefc8 DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/f60da323-af68-45db-9016-08e0d1d7ded5 --source-entity SourceAnalysis='{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/7fb74527-c36d-4be8-8139-ac1be4c97365}'
To create template from another account’s template, you need to grant cross account resource permission for DescribeTemplate the account that contains the template.
You can use a file to pass JSON to the function if you prefer.
", + "CreateTemplateAlias": "Creates a template alias for a template.
CLI syntax:
aws quicksight create-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name PROD —version-number 1
Deletes a dashboard.
CLI syntax:
aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id 123123123
aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id 123123123 —version-number 3
Deletes a dataset.
CLI syntax:
aws quicksight delete-data-set \\
--aws-account-id=111111111111 \\
--data-set-id=unique-data-set-id
Deletes the data source permanently. This action breaks all the datasets that reference the deleted data source.
CLI syntax:
aws quicksight delete-data-source \\
--aws-account-id=111122223333 \\
--data-source-id=unique-data-source-id
Removes a user group from Amazon QuickSight.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name>
.
CLI Sample:
aws quicksight delete-group -\\-aws-account-id=111122223333 -\\-namespace=default -\\-group-name=Sales-Management
Removes a user from a group so that the user is no longer a member of the group.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name>
.
The condition resource is the user name.
The condition key is quicksight:UserName
.
CLI Sample:
aws quicksight delete-group-membership --aws-account-id=111122223333 --namespace=default --group-name=Sales-Management --member-name=Charlie
Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
CLI Sample:
aws quicksight delete-user --aws-account-id=111122223333 --namespace=default --user-name=Pat
Deletes a user identified by its principal ID.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
CLI Sample:
aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 --namespace=default --principal-id=ABCDEFJA26JLI7EUUOEHS
Deletes an existing assignment.
CLI syntax:
aws quicksight delete-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=testtest --region=us-east-1 --namespace=default
Deletes a template.
CLI syntax:
aws quicksight delete-template --aws-account-id 111122223333 —-template-id reports_test_template --version-number 2
aws quicksight delete-template —aws-account-id 111122223333 —template-id reports_test_template —alias-name STAGING
aws quicksight delete-template —aws-account-id 111122223333 —template-id reports_test_template —alias-name ‘\\$LATEST’
aws quicksight delete-template --aws-account-id 111122223333 —-template-id reports_test_template
If version number which is an optional field is not passed the template (including all the versions) is deleted by the API, if version number is provided, the specific template version is deleted by the API.
Users can explicitly describe the latest version of the template by passing $LATEST
to the alias-name
parameter. $LATEST
is an internally supported alias, which points to the latest version of the template.
Update template alias of given template.
CLI syntax:
aws quicksight delete-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name 'STAGING'
Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.
CLI Sample:
aws quicksight delete-user --aws-account-id=111122223333 --namespace=default --user-name=Pat
Deletes a user identified by its principal ID.
CLI Sample:
aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 --namespace=default --principal-id=ABCDEFJA26JLI7EUUOEHS
Provides a summary for a dashboard.
CLI syntax:
aws quicksight describe-dashboard --aws-account-id 111122223333 —dashboard-id reports_test_report -version-number 2
aws quicksight describe-dashboard --aws-account-id 111122223333 —dashboard-id reports_test_report -alias-name ‘$PUBLISHED’
Describes read and write permissions on a dashboard.
CLI syntax:
aws quicksight describe-dashboard-permissions --aws-account-id 735340738645 —dashboard-id reports_test_bob_report
Describes a dataset.
CLI syntax:
aws quicksight describe-data-set \\
--aws-account-id=111111111111 \\
--data-set-id=unique-data-set-id
Describes the permissions on a dataset.
The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id
CLI syntax:
aws quicksight describe-data-set-permissions \\
--aws-account-id=111122223333 \\
--data-set-id=unique-data-set-id \\
Describes a data source.
The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id
Describes the resource permissions for a data source.
The permissions resource is aws:quicksight:region:aws-account-id:datasource/data-source-id
Returns an Amazon QuickSight group's description and Amazon Resource Name (ARN).
The permissions resource is arn:aws:quicksight:us-east-1:<relevant-aws-account-id>:group/default/<group-name>
.
The response is the group object.
CLI Sample:
aws quicksight describe-group -\\-aws-account-id=11112222333 -\\-namespace=default -\\-group-name=Sales
Returns information about a user, given the user name.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
The response is a user object that contains the user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.
CLI Sample:
aws quicksight describe-user --aws-account-id=111122223333 --namespace=default --user-name=Pat
Generates a server-side embeddable URL and authorization code. Before this can work properly, first you need to configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards.
Currently, you can use GetDashboardEmbedURL
only from the server, not from the user’s browser.
CLI Sample:
Assume the role with permissions enabled for actions: quickSight:RegisterUser
and quicksight:GetDashboardEmbedURL
. You can use assume-role, assume-role-with-web-identity, or assume-role-with-saml.
aws sts assume-role --role-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --role-session-name embeddingsession
If the user does not exist in QuickSight, register the user:
aws quicksight register-user --aws-account-id 111122223333 --namespace default --identity-type IAM --iam-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --user-role READER --session-name \"embeddingsession\" --email user123@example.com --region us-east-1
Get the URL for the embedded dashboard
aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type IAM
Describes an existing IAMPolicy Assignment by specified assignment name.
CLI syntax:
aws quicksight describe-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=testtest --namespace=default --region=us-east-1
Describes a SPICE ingestion.
", + "DescribeTemplate": "Describes a template's metadata.
CLI syntax:
aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template
aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template --version-number-2
aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template --alias-name '\\$LATEST'
Users can explicitly describe the latest version of the dashboard by passing $LATEST
to the alias-name
parameter. $LATEST
is an internally supported alias, which points to the latest version of the dashboard.
Describes the template aliases of a template.
CLI syntax:
aws quicksight describe-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name 'STAGING'
Describes read and write permissions on a template.
CLI syntax:
aws quicksight describe-template-permissions —aws-account-id 735340738645 —template-id reports_test_template
Returns information about a user, given the user name.
The response is a user object that contains the user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.
CLI Sample:
aws quicksight describe-user --aws-account-id=111122223333 --namespace=default --user-name=Pat
Generates a server-side embeddable URL and authorization code. Before this can work properly, first you need to configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards.
Currently, you can use GetDashboardEmbedURL
only from the server, not from the user’s browser.
CLI Sample:
Assume the role with permissions enabled for actions: quickSight:RegisterUser
and quicksight:GetDashboardEmbedURL
. You can use assume-role, assume-role-with-web-identity, or assume-role-with-saml.
aws sts assume-role --role-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --role-session-name embeddingsession
If the user does not exist in QuickSight, register the user:
aws quicksight register-user --aws-account-id 111122223333 --namespace default --identity-type IAM --iam-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --user-role READER --session-name \"embeddingsession\" --email user123@example.com --region us-east-1
Get the URL for the embedded dashboard (IAM
identity authentication):
aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type IAM
Get the URL for the embedded dashboard (QUICKSIGHT
identity authentication):
aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type QUICKSIGHT --user-arn arn:aws:quicksight:us-east-1:111122223333:user/default/embedding_quicksight_dashboard_role/embeddingsession
Lists all the versions of the dashboards in the Quicksight subscription.
CLI syntax:
aws quicksight list-template-versions —aws-account-id 111122223333 —template-id reports-test-template
", + "ListDashboards": "Lists dashboards in the AWS account.
CLI syntax:
aws quicksight list-dashboards --aws-account-id 111122223333 --max-results 5 —next-token 'next-10'
Lists all of the datasets belonging to this account in an AWS region.
The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*
CLI syntax: aws quicksight list-data-sets --aws-account-id=111111111111
Lists data sources in current AWS region that belong to this AWS account.
The permissions resource is: arn:aws:quicksight:region:aws-account-id:datasource/*
CLI syntax: aws quicksight list-data-sources --aws-account-id=111122223333
Lists member users in a group.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name>
.
The response is a list of group member objects.
CLI Sample:
aws quicksight list-group-memberships -\\-aws-account-id=111122223333 -\\-namespace=default
Lists all user groups in Amazon QuickSight.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/*
.
The response is a list of group objects.
CLI Sample:
aws quicksight list-groups -\\-aws-account-id=111122223333 -\\-namespace=default
Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
The response is a one or more group objects.
CLI Sample:
aws quicksight list-user-groups -\\-user-name=Pat -\\-aws-account-id=111122223333 -\\-namespace=default -\\-region=us-east-1
Returns a list of all of the Amazon QuickSight users belonging to this account.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/*
.
The response is a list of user objects, containing each user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.
CLI Sample:
aws quicksight list-users --aws-account-id=111122223333 --namespace=default
Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
The condition resource is the Amazon Resource Name (ARN) for the IAM user or role, and the session name.
The condition keys are quicksight:IamArn
and quicksight:SessionName
.
CLI Sample:
aws quicksight register-user -\\-aws-account-id=111122223333 -\\-namespace=default -\\-email=pat@example.com -\\-identity-type=IAM -\\-user-role=AUTHOR -\\-iam-arn=arn:aws:iam::111122223333:user/Pat
Lists assignments in current QuickSight account.
CLI syntax:
aws quicksight list-iam-policy-assignments --aws-account-id=111122223333 --max-result=5 --assignment-status=ENABLED --namespace=default --region=us-east-1 --next-token=3
Lists all the assignments and the ARNs for the associated IAM policies assigned to the specified user and the group or groups that the user belongs to.
CLI syntax:
aws quicksight list-iam-policy-assignments-for-user --aws-account-id=111122223333 --user-name=user5 --namespace=default --max-result=6 --region=us-east-1
Lists the history of SPICE ingestions for a dataset.
", + "ListTagsForResource": "Lists the tags assigned to a resource.
CLI syntax:
aws quicksight list-tags-for-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 --region us-east-1
Lists all the aliases of a template.
CLI syntax:
aws quicksight list-template-aliases --aws-account-id 111122223333 —template-id 'reports_test_template'
Lists all the versions of the templates in the Quicksight account.
CLI syntax:
aws quicksight list-template-versions --aws-account-id 111122223333 --aws-account-id 196359894473 --template-id reports-test-template
", + "ListTemplates": "Lists all the templates in the QuickSight account.
CLI syntax:
aws quicksight list-templates --aws-account-id 111122223333 --max-results 1 —next-token AYADeJuxwOypAndSoOn
Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.
The response is a one or more group objects.
CLI Sample:
aws quicksight list-user-groups -\\-user-name=Pat -\\-aws-account-id=111122223333 -\\-namespace=default -\\-region=us-east-1
Returns a list of all of the Amazon QuickSight users belonging to this account.
The response is a list of user objects, containing each user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.
CLI Sample:
aws quicksight list-users --aws-account-id=111122223333 --namespace=default
Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request.
CLI Sample:
aws quicksight register-user -\\-aws-account-id=111122223333 -\\-namespace=default -\\-email=pat@example.com -\\-identity-type=IAM -\\-user-role=AUTHOR -\\-iam-arn=arn:aws:iam::111122223333:user/Pat
Assigns a tag or tags to a resource.
Assigns one or more tags (key-value pairs) to the specified QuickSight resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource action with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource. QuickSight supports tagging on data-set, data-source, dashboard, template.
Tagging for QuickSight works in a similar was to tagging for other AWS services, except for the following:
You can't use tags to track AWS costs for QuickSight, because QuickSight costs are based on users and SPICE capacity, which aren't taggable resources.
QuickSight doesn't currently support the Tag Editor for AWS Resource Groups.
CLI syntax to tag a resource:
aws quicksight tag-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 --tags Key=K1,Value=V1 Key=K2,Value=V2 --region us-east-1
Removes a tag or tags from a resource.
CLI syntax:
aws quicksight untag-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 --tag-keys K1 K2 --region us-east-1
Updates a dashboard in the AWS account.
CLI syntax:
aws quicksight update-dashboard --aws-account-id 111122223333 --dashboard-id 123123123 --dashboard-name \"test-update102\" --source-entity SourceTemplate={Arn=arn:aws:quicksight:us-west-2:111122223333:template/sales-report-template2} --data-set-references DataSetPlaceholder=SalesDataSet,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0e251aef-9ebf-46e1-b852-eb4fa33c1d3a
aws quicksight update-dashboard --cli-input-json file://update-dashboard.json
Updates read and write permissions on a dashboard.
CLI syntax:
aws quicksight update-dashboard-permissions —cli-input-json file://update-permission.json
A sample update-permissions.json for granting read only permissions:
{ \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"GrantPermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\" ] } ] }
A sample update-permissions.json for granting read and write permissions:
{ \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"GrantPermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\", \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }
A sample update-permissions.json for revoking write permissions:
{ \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"RevokePermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }
A sample update-permissions.json for revoking read and write permissions:
{ \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"RevokePermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\", \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }
To obtain the principal name of a QuickSight user or group, you can use describe-group or describe-user. For example:
aws quicksight describe-user --aws-account-id 111122223333 --namespace default --user-name user2 --region us-east-1 { \"User\": { \"Arn\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Active\": true, \"Email\": \"user2@example.com\", \"Role\": \"ADMIN\", \"UserName\": \"user2\", \"PrincipalId\": \"federated/iam/abcd2abcdabcdeabc5ab5\" }, \"RequestId\": \"8f74bb31-6291-448a-a71c-a765a44bae31\", \"Status\": 200 }
Updates the published version of a dashboard.
CLI syntax:
aws quicksight update-dashboard-published-version --aws-account-id 111122223333 --dashboard-id dashboard-w1 ---version-number 2
Updates a dataset.
CLI syntax:
aws quicksight update-data-set \\
--aws-account-id=111122223333 \\
--data-set-id=unique-data-set-id \\
--name='My dataset' \\
--import-mode=SPICE \\
--physical-table-map='{
\"physical-table-id\": {
\"RelationalTable\": {
\"DataSourceArn\": \"arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id\",
\"Name\": \"table1\",
\"InputColumns\": [
{
\"Name\": \"column1\",
\"Type\": \"STRING\"
}
]
}
}'
Updates the permissions on a dataset.
The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id
CLI syntax:
aws quicksight update-data-set-permissions \\
--aws-account-id=111122223333 \\
--data-set-id=unique-data-set-id \\
--grant-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user1\",\"Actions\":[\"quicksight:DescribeDataSet\",\"quicksight:DescribeDataSetPermissions\",\"quicksight:PassDataSet\",\"quicksight:ListIngestions\",\"quicksight:DescribeIngestion\"]}]' \\
--revoke-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",\"Actions\":[\"quicksight:UpdateDataSet\",\"quicksight:DeleteDataSet\",\"quicksight:UpdateDataSetPermissions\",\"quicksight:CreateIngestion\",\"quicksight:CancelIngestion\"]}]'
Updates a data source.
The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id
CLI syntax:
aws quicksight update-data-source \\
--aws-account-id=111122223333 \\
--data-source-id=unique-data-source-id \\
--name='My Data Source' \\
--data-source-parameters='{\"PostgreSqlParameters\":{\"Host\":\"my-db-host.example.com\",\"Port\":1234,\"Database\":\"my-db\"}}' \\
--credentials='{\"CredentialPair\":{\"Username\":\"username\",\"Password\":\"password\"}}
Updates the permissions to a data source.
The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id
CLI syntax:
aws quicksight update-data-source-permissions \\
--aws-account-id=111122223333 \\
--data-source-id=unique-data-source-id \\
--name='My Data Source' \\
--grant-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user1\",\"Actions\":[\"quicksight:DescribeDataSource\",\"quicksight:DescribeDataSourcePermissions\",\"quicksight:PassDataSource\"]}]' \\
--revoke-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",\"Actions\":[\"quicksight:UpdateDataSource\",\"quicksight:DeleteDataSource\",\"quicksight:UpdateDataSourcePermissions\"]}]'
Changes a group description.
The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name>
.
The response is a group object.
CLI Sample:
aws quicksight update-group --aws-account-id=111122223333 --namespace=default --group-name=Sales --description=\"Sales BI Dashboards\"
Updates an Amazon QuickSight user.
The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name>
.
The response is a user object that contains the user's Amazon QuickSight user name, email address, active or inactive status in Amazon QuickSight, Amazon QuickSight role, and Amazon Resource Name (ARN).
CLI Sample:
aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@amazon.com --aws-account-id=111122223333 --namespace=default --region=us-east-1
Updates an existing assignment. This operation updates only the optional parameter or parameters that are specified in the request.
CLI syntax:
aws quicksight update-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=FullAccessAssignment --assignment-status=DRAFT --policy-arns=arn:aws:iam::aws:policy/AdministratorAccess --identities=\"user=user-1,user-2,group=admin\" --namespace=default --region=us-east-1
Updates a template from an existing QuickSight analysis.
CLI syntax:
aws quicksight update-template --aws-account-id 111122223333 --template-id reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/c684a204-d134-4c53-a63c-451f72c60c28 DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/15840b7d-b542-4491-937b-602416b367b3 —source-entity SourceAnalysis=’{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/c5731fe9-4708-4598-8f6d-cf2a70875b6d}
You can also pass in a json file: aws quicksight update-template —cli-input-json file://create-template.json
Updates the template alias of a template.
CLI syntax:
aws quicksight update-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name STAGING —template-version-number 2
Updates the permissions on a template.
CLI syntax:
aws quicksight describe-template-permissions —aws-account-id 111122223333 —template-id reports_test_template
aws quicksight update-template-permissions —cli-input-json file://update-permission.json
The structure of update-permissions.json
to add permissions:
{ \"AwsAccountId\": \"111122223333\",
\"DashboardId\": \"reports_test_template\",
\"GrantPermissions\": [
{ \"Principal\": \"arn:aws:quicksight:us-east-1:196359894473:user/default/user3\",
\"Actions\": [
\"quicksight:DescribeTemplate\",
\"quicksight:ListTemplateVersions\"
] } ] }
The structure of update-permissions.json
to add permissions:
{ \"AwsAccountId\": \"111122223333\",
\"DashboardId\": \"reports_test_template\",
\"RevokePermissions\": [
{ \"Principal\": \"arn:aws:quicksight:us-east-1:196359894473:user/default/user3\",
\"Actions\": [
\"quicksight:DescribeTemplate\",
\"quicksight:ListTemplateVersions\"
] } ] }
To obtain the principal name of a QuickSight group or user, use user describe-group or describe-user. For example:
aws quicksight describe-user
--aws-account-id 111122223333
--namespace default
--user-name user2
--region us-east-1
{
\"User\": {
\"Arn\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",
\"Active\": true,
\"Email\": \"user2@example.com\",
\"Role\": \"ADMIN\",
\"UserName\": \"user2\",
\"PrincipalId\": \"federated/iam/abcd2abcdabcdeabc5ab5\"
},
\"RequestId\": \"8f74bb31-6291-448a-a71c-a765a44bae31\",
\"Status\": 200
}
Updates an Amazon QuickSight user.
The response is a user object that contains the user's Amazon QuickSight user name, email address, active or inactive status in Amazon QuickSight, Amazon QuickSight role, and Amazon Resource Name (ARN).
CLI Sample:
aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@example.com --aws-account-id=111122223333 --namespace=default --region=us-east-1
The action to grant or revoke permissions on. For example, \"quicksight:DescribeDashboard\".
" + } + }, + "ActiveIAMPolicyAssignment": { + "base": "The active IAM policy assignment.
", + "refs": { + "ActiveIAMPolicyAssignmentList$member": null + } + }, + "ActiveIAMPolicyAssignmentList": { + "base": null, + "refs": { + "ListIAMPolicyAssignmentsForUserResponse$ActiveAssignments": "Active assignments for this user.
" + } + }, + "AdHocFilteringOption": { + "base": "Ad hoc filtering option.
", + "refs": { + "DashboardPublishOptions$AdHocFilteringOption": "Ad hoc filtering option.
" + } + }, + "AliasName": { + "base": null, + "refs": { + "CreateTemplateAliasRequest$AliasName": "The name you want to give the template's alias. Alias names can't begin with a $
, which is reserved by QuickSight. Alias names that start with ‘$’ sign are QuickSight reserved naming and can't be deleted.
The alias of the template. If alias-name is provided, the version that the alias-name points to is deleted. Alias names that start with $
are reserved by QuickSight and can't be deleted.”
The name of the alias.
", + "DescribeDashboardRequest$AliasName": "The alias name.
", + "DescribeTemplateAliasRequest$AliasName": "The alias name. $PUBLISHED
is not supported for template.
This is an optional field, when an alias name is provided, the version referenced by the alias is described. Refer to CreateTemplateAlias
to create a template alias. $PUBLISHED
is not supported for template.
The display name of the template alias.
", + "UpdateTemplateAliasRequest$AliasName": "The alias name.
" + } + }, + "AmazonElasticsearchParameters": { + "base": "Amazon Elasticsearch parameters.
", + "refs": { + "DataSourceParameters$AmazonElasticsearchParameters": "Amazon Elasticsearch parameters.
" + } + }, "Arn": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$UserArn": "The Amazon QuickSight user's ARN, for use with QUICKSIGHT
identity type. You can use this for any of the following:
Amazon QuickSight users in your account (readers, authors, or admins)
AD users
Invited non-federated users
Federated IAM users
Federated IAM role-based sessions
The Amazon Resource Name (ARN) for the group.
", - "GroupMember$Arn": "The Amazon Resource Name (ARN) for the group member (user).
", - "User$Arn": "The Amazon Resource Name (ARN) for the user.
" + "ActiveIAMPolicyAssignment$PolicyArn": "The ARN of the resource.
", + "CancelIngestionResponse$Arn": "The Amazon Resource Name (ARN) for the data ingestion.
", + "CreateDashboardResponse$Arn": "The ARN of the dashboard.
", + "CreateDashboardResponse$VersionArn": "The ARN of the dashboard, including the version number of the first version that is created.
", + "CreateDataSetResponse$Arn": "The ARN of the dataset.
", + "CreateDataSetResponse$IngestionArn": "The Amazon Resource Name (ARN) for the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE
", + "CreateDataSourceResponse$Arn": "The ARN of the data source.
", + "CreateIAMPolicyAssignmentRequest$PolicyArn": "An IAM policy ARN that you want to apply to the QuickSight users and groups specified in this assignment.
", + "CreateIAMPolicyAssignmentResponse$PolicyArn": "An IAM policy ARN that is applied to the QuickSight users and groups specified in this assignment.
", + "CreateIngestionResponse$Arn": "The Amazon Resource Name (ARN) for the data ingestion.
", + "CreateTemplateResponse$Arn": "The Amazon Resource Name (ARN) for the template.
", + "CreateTemplateResponse$VersionArn": "The Amazon Resource Name (ARN) for the template, including the version information of the first version.
", + "CustomSql$DataSourceArn": "The ARN of the data source.
", + "Dashboard$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DashboardSourceTemplate$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DashboardSummary$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DashboardVersion$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DashboardVersion$SourceEntityArn": "Source entity ARN.
", + "DashboardVersionSummary$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DashboardVersionSummary$SourceEntityArn": "Source entity ARN.
", + "DataSet$Arn": "The Amazon Resource name (ARN) of the resource.
", + "DataSetReference$DataSetArn": "Dataset ARN.
", + "DataSetSummary$Arn": "The Amazon Resource name (ARN) of the dataset.
", + "DataSource$Arn": "The Amazon Resource name (ARN) of the data source.
", + "DeleteDashboardResponse$Arn": "The ARN of the resource.
", + "DeleteDataSetResponse$Arn": "The ARN of the dataset.
", + "DeleteDataSourceResponse$Arn": "The ARN of the data source you deleted.
", + "DeleteTemplateAliasResponse$Arn": "The ARN of the resource.
", + "DeleteTemplateResponse$Arn": "The ARN of the resource.
", + "DescribeDashboardPermissionsResponse$DashboardArn": "The ARN of the dashboard.
", + "DescribeDataSetPermissionsResponse$DataSetArn": "The ARN of the dataset.
", + "DescribeDataSourcePermissionsResponse$DataSourceArn": "The ARN of the data source.
", + "DescribeTemplatePermissionsResponse$TemplateArn": "The ARN of the template.
", + "GetDashboardEmbedUrlRequest$UserArn": "The Amazon QuickSight user's ARN, for use with QUICKSIGHT
identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:
Active Directory (AD) users or group members
Invited non-federated users
IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM Federation
The Amazon Resource name (ARN) for the group.
", + "GroupMember$Arn": "The Amazon Resource name (ARN) for the group member (user).
", + "IAMPolicyAssignment$PolicyArn": "Policy ARN.
", + "Ingestion$Arn": "The Amazon Resource name (ARN) of the resource.
", + "ListTagsForResourceRequest$ResourceArn": "The ARN of the resource you want a list of tags for.
", + "RelationalTable$DataSourceArn": "Data source ARN.
", + "RowLevelPermissionDataSet$Arn": "The Amazon Resource name (ARN) of the permission dataset.
", + "S3Source$DataSourceArn": "Data source ARN.
", + "TagResourceRequest$ResourceArn": "The ARN of the resource you want to tag.
", + "Template$Arn": "The ARN of the template.
", + "TemplateAlias$Arn": "The ARN of the template alias.
", + "TemplateSourceAnalysis$Arn": "The Amazon Resource name (ARN) of the resource.
", + "TemplateSourceTemplate$Arn": "The Amazon Resource name (ARN) of the resource.
", + "TemplateSummary$Arn": "A summary of a template.
", + "TemplateVersion$SourceEntityArn": "The ARN of the analysis or template which was used to create this template.
", + "TemplateVersionSummary$Arn": "The ARN of the template version.
", + "UntagResourceRequest$ResourceArn": "The ARN of the resource you to untag.
", + "UpdateDashboardPermissionsResponse$DashboardArn": "The ARN of the dashboard.
", + "UpdateDashboardPublishedVersionResponse$DashboardArn": "The ARN of the dashboard.
", + "UpdateDashboardResponse$Arn": "The ARN of the resource.
", + "UpdateDashboardResponse$VersionArn": "The ARN of the dashboard, including the version number.
", + "UpdateDataSetPermissionsResponse$DataSetArn": "The ARN of the dataset.
", + "UpdateDataSetResponse$Arn": "The ARN of the dataset.
", + "UpdateDataSetResponse$IngestionArn": "The Amazon Resource Name (ARN) for the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE
", + "UpdateDataSourcePermissionsResponse$DataSourceArn": "The ARN of the data source.
", + "UpdateDataSourceResponse$Arn": "The ARN of the data source.
", + "UpdateIAMPolicyAssignmentRequest$PolicyArn": "An IAM policy ARN that will be applied to specified QuickSight users and groups in this assignment.
", + "UpdateIAMPolicyAssignmentResponse$PolicyArn": "The IAM policy ARN assigned to the QuickSight users and groups specified in this request.
", + "UpdateTemplatePermissionsResponse$TemplateArn": "The ARN of the template.
", + "UpdateTemplateResponse$Arn": "The Amazon Resource Name (ARN) for the template.
", + "UpdateTemplateResponse$VersionArn": "The Amazon Resource Name (ARN) for the template, including the version information of the first version.
", + "User$Arn": "The Amazon Resource name (ARN) for the user.
", + "VpcConnectionProperties$VpcConnectionArn": "VPC connection ARN.
" + } + }, + "AssignmentStatus": { + "base": null, + "refs": { + "CreateIAMPolicyAssignmentRequest$AssignmentStatus": "The status of an assignment:
ENABLED - Anything specified in this assignment is used while creating the data source.
DISABLED - This assignment isn't used while creating the data source.
DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.
The status of an assignment:
ENABLED - Anything specified in this assignment is used while creating the data source.
DISABLED - This assignment isn't used while creating the data source.
DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.
Assignment status.
", + "IAMPolicyAssignmentSummary$AssignmentStatus": "Assignment status.
", + "ListIAMPolicyAssignmentsRequest$AssignmentStatus": "The status of the assignment.
", + "UpdateIAMPolicyAssignmentRequest$AssignmentStatus": "The status of an assignment:
ENABLED - Anything specified in this assignment is used while creating the data source.
DISABLED - This assignment isn't used while creating the data source.
DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.
The status of the assignment:
ENABLED - Anything specified in this assignment is used while creating the data source.
DISABLED - This assignment isn't used while creating the data source.
DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.
Athena parameters.
", + "refs": { + "DataSourceParameters$AthenaParameters": "Athena parameters.
" + } + }, + "AuroraParameters": { + "base": "Aurora parameters.
", + "refs": { + "DataSourceParameters$AuroraParameters": "Aurora MySQL parameters.
" + } + }, + "AuroraPostgreSqlParameters": { + "base": "Aurora PostgreSQL parameters.
", + "refs": { + "DataSourceParameters$AuroraPostgreSqlParameters": "Aurora PostgreSQL parameters.
" } }, "AwsAccountId": { "base": null, "refs": { + "CancelIngestionRequest$AwsAccountId": "The AWS account ID.
", + "CreateDashboardRequest$AwsAccountId": "AWS account ID where you want to create the dashboard.
", + "CreateDataSetRequest$AwsAccountId": "The AWS Account ID.
", + "CreateDataSourceRequest$AwsAccountId": "The AWS account ID.
", "CreateGroupMembershipRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "CreateGroupRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "CreateIAMPolicyAssignmentRequest$AwsAccountId": "The AWS Account ID where you want to assign QuickSight users or groups to an IAM policy.
", + "CreateIngestionRequest$AwsAccountId": "The AWS account ID.
", + "CreateTemplateAliasRequest$AwsAccountId": "AWS account ID that contains the template you are aliasing.
", + "CreateTemplateRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "DeleteDashboardRequest$AwsAccountId": "AWS account ID that contains the dashboard you are deleting.
", + "DeleteDataSetRequest$AwsAccountId": "The AWS Account ID.
", + "DeleteDataSourceRequest$AwsAccountId": "The AWS account ID.
", "DeleteGroupMembershipRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "DeleteGroupRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "DeleteIAMPolicyAssignmentRequest$AwsAccountId": "The AWS account ID where you want to delete an IAM policy assignment.
", + "DeleteTemplateAliasRequest$AwsAccountId": "AWS account ID that contains the template alias you are deleting.
", + "DeleteTemplateRequest$AwsAccountId": "AWS account ID that contains the template you are deleting.
", "DeleteUserByPrincipalIdRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "DeleteUserRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "DescribeDashboardPermissionsRequest$AwsAccountId": "AWS account ID that contains the dashboard you are describing permissions of.
", + "DescribeDashboardRequest$AwsAccountId": "AWS account ID that contains the dashboard you are describing.
", + "DescribeDataSetPermissionsRequest$AwsAccountId": "The AWS Account ID.
", + "DescribeDataSetRequest$AwsAccountId": "The AWS Account ID.
", + "DescribeDataSourcePermissionsRequest$AwsAccountId": "The AWS account ID.
", + "DescribeDataSourceRequest$AwsAccountId": "The AWS account ID.
", "DescribeGroupRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "DescribeIAMPolicyAssignmentRequest$AwsAccountId": "The AWS account ID that contains the assignment you want to describe.
", + "DescribeIngestionRequest$AwsAccountId": "The AWS account ID.
", + "DescribeTemplateAliasRequest$AwsAccountId": "AWS account ID that contains the template alias you are describing.
", + "DescribeTemplatePermissionsRequest$AwsAccountId": "AWS account ID that contains the template you are describing.
", + "DescribeTemplateRequest$AwsAccountId": "AWS account ID that contains the template you are describing.
", "DescribeUserRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "GetDashboardEmbedUrlRequest$AwsAccountId": "AWS account ID that contains the dashboard you are embedding.
", + "IAMPolicyAssignment$AwsAccountId": "AWS account ID.
", + "ListDashboardVersionsRequest$AwsAccountId": "AWS account ID that contains the dashboard you are listing.
", + "ListDashboardsRequest$AwsAccountId": "AWS account ID that contains the dashboards you are listing.
", + "ListDataSetsRequest$AwsAccountId": "The AWS Account ID.
", + "ListDataSourcesRequest$AwsAccountId": "The AWS account ID.
", "ListGroupMembershipsRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "ListGroupsRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "ListIAMPolicyAssignmentsForUserRequest$AwsAccountId": "The AWS account ID that contains the assignment.
", + "ListIAMPolicyAssignmentsRequest$AwsAccountId": "The AWS account ID that contains this IAM policy assignment.
", + "ListIngestionsRequest$AwsAccountId": "The AWS account ID.
", + "ListTemplateAliasesRequest$AwsAccountId": "AWS account ID that contains the template aliases you are listing.
", + "ListTemplateVersionsRequest$AwsAccountId": "AWS account ID that contains the templates you are listing.
", + "ListTemplatesRequest$AwsAccountId": "AWS account ID that contains the templates you are listing.
", "ListUserGroupsRequest$AwsAccountId": "The AWS Account ID that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "ListUsersRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", "RegisterUserRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "UpdateDashboardPermissionsRequest$AwsAccountId": "AWS account ID that contains the dashboard you are updating.
", + "UpdateDashboardPublishedVersionRequest$AwsAccountId": "AWS account ID that contains the dashboard you are updating.
", + "UpdateDashboardRequest$AwsAccountId": "AWS account ID that contains the dashboard you are updating.
", + "UpdateDataSetPermissionsRequest$AwsAccountId": "The AWS Account ID.
", + "UpdateDataSetRequest$AwsAccountId": "The AWS Account ID.
", + "UpdateDataSourcePermissionsRequest$AwsAccountId": "The AWS account ID.
", + "UpdateDataSourceRequest$AwsAccountId": "The AWS account ID.
", "UpdateGroupRequest$AwsAccountId": "The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
", + "UpdateIAMPolicyAssignmentRequest$AwsAccountId": "The AWS account ID that contains the IAM policy assignment.
", + "UpdateTemplateAliasRequest$AwsAccountId": "AWS account ID that contains the template aliases you are updating.
", + "UpdateTemplatePermissionsRequest$AwsAccountId": "AWS account ID that contains the template.
", + "UpdateTemplateRequest$AwsAccountId": "AWS account ID that contains the template you are updating.
", "UpdateUserRequest$AwsAccountId": "The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.
" } }, + "AwsIotAnalyticsParameters": { + "base": "AWS IoT Analytics parameters.
", + "refs": { + "DataSourceParameters$AwsIotAnalyticsParameters": "AWS IoT Analytics parameters.
" + } + }, "Boolean": { "base": null, "refs": { - "User$Active": "Active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an AD user, that user is inactive until they sign in and provide a password
" + "SslProperties$DisableSsl": "A boolean flag to control whether SSL should be disabled.
", + "UploadSettings$ContainsHeader": "Whether or not the file(s) has a header row.
", + "User$Active": "Active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an AD user, that user is inactive until they sign in and provide a password.
" } }, - "CreateGroupMembershipRequest": { + "CalculatedColumn": { + "base": "A calculated column for a dataset.
", + "refs": { + "CalculatedColumnList$member": null + } + }, + "CalculatedColumnList": { "base": null, "refs": { + "CreateColumnsOperation$Columns": "Calculated columns to create.
" } }, - "CreateGroupMembershipResponse": { + "CancelIngestionRequest": { "base": null, "refs": { } }, - "CreateGroupRequest": { - "base": "The request object for this operation.
", + "CancelIngestionResponse": { + "base": null, "refs": { } }, - "CreateGroupResponse": { - "base": "The response object for this operation.
", + "CastColumnTypeOperation": { + "base": "A transform operation that casts a column to a different type.
", "refs": { + "TransformOperation$CastColumnTypeOperation": "A transform operation that casts a column to a different type.
" } }, - "DeleteGroupMembershipRequest": { + "Catalog": { "base": null, "refs": { + "PrestoParameters$Catalog": "Catalog.
" } }, - "DeleteGroupMembershipResponse": { + "ClusterId": { "base": null, "refs": { + "RedshiftParameters$ClusterId": "Cluster ID. This can be blank if the Host
and Port
are provided.
New column data type.
", + "OutputColumn$Type": "Type.
" } }, - "DeleteGroupResponse": { + "ColumnGroup": { + "base": "Groupings of columns that work together in certain QuickSight features. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.
", + "refs": { + "ColumnGroupList$member": null + } + }, + "ColumnGroupColumnSchema": { + "base": "A structure describing the name, datatype, and geographic role of the columns.
", + "refs": { + "ColumnGroupColumnSchemaList$member": null + } + }, + "ColumnGroupColumnSchemaList": { "base": null, "refs": { + "ColumnGroupSchema$ColumnGroupColumnSchemaList": "A structure containing the list of column group column schemas.
" } }, - "DeleteUserByPrincipalIdRequest": { - "base": "", + "ColumnGroupList": { + "base": null, "refs": { + "CreateDataSetRequest$ColumnGroups": "Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.
", + "DataSet$ColumnGroups": "Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.
", + "UpdateDataSetRequest$ColumnGroups": "Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.
" } }, - "DeleteUserByPrincipalIdResponse": { + "ColumnGroupName": { "base": null, "refs": { + "GeoSpatialColumnGroup$Name": "A display name for the hierarchy.
" } }, - "DeleteUserRequest": { + "ColumnGroupSchema": { + "base": "The column group schema.
", + "refs": { + "ColumnGroupSchemaList$member": null + } + }, + "ColumnGroupSchemaList": { "base": null, "refs": { + "DataSetConfiguration$ColumnGroupSchemaList": "A structure containing the list of column group schemas.
" } }, - "DeleteUserResponse": { + "ColumnId": { "base": null, "refs": { + "CalculatedColumn$ColumnId": "A unique ID to identify a calculated column. During dataset update, if the column ID of a calculated column matches that of an existing calculated column, QuickSight preserves the existing calculated column.
" } }, - "DescribeGroupRequest": { + "ColumnList": { "base": null, "refs": { + "GeoSpatialColumnGroup$Columns": "Columns in this hierarchy.
" } }, - "DescribeGroupResponse": { + "ColumnName": { "base": null, "refs": { + "CalculatedColumn$ColumnName": "Column name.
", + "CastColumnTypeOperation$ColumnName": "Column name.
", + "ColumnList$member": null, + "InputColumn$Name": "The name of this column in the underlying data source.
", + "OutputColumn$Name": "A display name for the dataset.
", + "RenameColumnOperation$ColumnName": "Name of the column to be renamed.
", + "RenameColumnOperation$NewColumnName": "New name for the column.
", + "TagColumnOperation$ColumnName": "The column that this operation acts on.
" } }, - "DescribeUserRequest": { + "ColumnSchema": { + "base": "The column schema.
", + "refs": { + "ColumnSchemaList$member": null + } + }, + "ColumnSchemaList": { "base": null, "refs": { + "DataSetSchema$ColumnSchemaList": "A structure containing the list of column schemas.
" } }, - "DescribeUserResponse": { + "ColumnTag": { + "base": "A tag for a column in a TagColumnOperation. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.
", + "refs": { + "ColumnTagList$member": null + } + }, + "ColumnTagList": { "base": null, "refs": { + "TagColumnOperation$Tags": "The dataset column tag, currently only used for geospatial type tagging. .
This is not tags for the AWS tagging feature. .
The domain specified is not on the allowlist. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.
", + "ConcurrentUpdatingException": { + "base": "A resource is already in an \"actionable\" state that must complete before a new update can be applied.
", "refs": { } }, - "EmbeddingUrl": { + "ConflictException": { + "base": "Updating or deleting a resource can cause an inconsistent state.
", + "refs": { + } + }, + "CreateColumnsOperation": { + "base": "A transform operation that creates calculated columns. Columns created in one such operation form a lexical closure.
", + "refs": { + "TransformOperation$CreateColumnsOperation": "An operation that creates calculated columns. Columns created in one such operation form a lexical closure.
" + } + }, + "CreateDashboardRequest": { "base": null, "refs": { - "GetDashboardEmbedUrlResponse$EmbedUrl": "URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code that enables a single-signon session.
" } }, - "ExceptionResourceType": { + "CreateDashboardResponse": { "base": null, "refs": { - "LimitExceededException$ResourceType": "Limit exceeded.
", - "ResourceExistsException$ResourceType": "The AWS request ID for this request.
", - "ResourceNotFoundException$ResourceType": "The AWS request ID for this request.
", - "ResourceUnavailableException$ResourceType": "The resource type for this request.
" } }, - "GetDashboardEmbedUrlRequest": { + "CreateDataSetRequest": { "base": null, "refs": { } }, - "GetDashboardEmbedUrlResponse": { + "CreateDataSetResponse": { "base": null, "refs": { } }, - "Group": { - "base": "A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. Currently, an Amazon QuickSight subscription can't contain more than 500 Amazon QuickSight groups.
", + "CreateDataSourceRequest": { + "base": null, "refs": { - "CreateGroupResponse$Group": "The name of the group.
", - "DescribeGroupResponse$Group": "The name of the group.
", - "GroupList$member": null, - "UpdateGroupResponse$Group": "The name of the group.
" } }, - "GroupDescription": { + "CreateDataSourceResponse": { "base": null, "refs": { - "CreateGroupRequest$Description": "A description for the group that you want to create.
", - "Group$Description": "The group description.
", - "UpdateGroupRequest$Description": "The description for the group that you want to update.
" } }, - "GroupList": { + "CreateGroupMembershipRequest": { "base": null, "refs": { - "ListGroupsResponse$GroupList": "The list of the groups.
", - "ListUserGroupsResponse$GroupList": "The list of groups the user is a member of.
" } }, - "GroupMember": { - "base": "A member of an Amazon QuickSight group. Currently, group members must be users. Groups can't be members of another group.
", + "CreateGroupMembershipResponse": { + "base": null, "refs": { - "CreateGroupMembershipResponse$GroupMember": "The group member.
", - "GroupMemberList$member": null } }, - "GroupMemberList": { + "CreateGroupRequest": { + "base": "The request object for this operation.
", + "refs": { + } + }, + "CreateGroupResponse": { + "base": "The response object for this operation.
", + "refs": { + } + }, + "CreateIAMPolicyAssignmentRequest": { "base": null, "refs": { - "ListGroupMembershipsResponse$GroupMemberList": "The list of the members of the group.
" } }, - "GroupMemberName": { + "CreateIAMPolicyAssignmentResponse": { "base": null, "refs": { - "CreateGroupMembershipRequest$MemberName": "The name of the user that you want to add to the group membership.
", - "DeleteGroupMembershipRequest$MemberName": "The name of the user that you want to delete from the group membership.
", - "GroupMember$MemberName": "The name of the group member (user).
" } }, - "GroupName": { + "CreateIngestionRequest": { "base": null, "refs": { - "CreateGroupMembershipRequest$GroupName": "The name of the group that you want to add the user to.
", - "CreateGroupRequest$GroupName": "A name for the group that you want to create.
", - "DeleteGroupMembershipRequest$GroupName": "The name of the group that you want to delete the user from.
", - "DeleteGroupRequest$GroupName": "The name of the group that you want to delete.
", - "DescribeGroupRequest$GroupName": "The name of the group that you want to describe.
", - "Group$GroupName": "The name of the group.
", - "ListGroupMembershipsRequest$GroupName": "The name of the group that you want to see a membership list of.
", - "UpdateGroupRequest$GroupName": "The name of the group that you want to update.
" } }, - "IdentityType": { + "CreateIngestionResponse": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$IdentityType": "The authentication method the user uses to sign in (IAM only).
", - "RegisterUserRequest$IdentityType": "Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:
IAM
: A user whose identity maps to an existing IAM user or role.
QUICKSIGHT
: A user whose identity is owned and managed internally by Amazon QuickSight.
The type of identity authentication used by the user.
" } }, - "IdentityTypeNotSupportedException": { - "base": "The identity type specified is not supported. Supported identity types include IAM and QUICKSIGHT.
", + "CreateTemplateAliasRequest": { + "base": null, "refs": { } }, - "InternalFailureException": { - "base": "An internal failure occurred.
", + "CreateTemplateAliasResponse": { + "base": null, "refs": { } }, - "InvalidNextTokenException": { - "base": "The NextToken
value isn't valid.
One or more parameters don't have a valid value.
", + "CreateTemplateResponse": { + "base": null, "refs": { } }, - "LimitExceededException": { - "base": "A limit is exceeded.
", + "CredentialPair": { + "base": "The combination of username and password that are used as credentials.
", "refs": { + "DataSourceCredentials$CredentialPair": "Credential pair.
" } }, - "ListGroupMembershipsRequest": { + "CustomSql": { + "base": "A physical table type built from the results of the custom SQL query.
", + "refs": { + "PhysicalTable$CustomSql": "A physical table type built from the results of the custom SQL query.
" + } + }, + "CustomSqlName": { "base": null, "refs": { + "CustomSql$Name": "A display name for the SQL query result.
" } }, - "ListGroupMembershipsResponse": { + "Dashboard": { + "base": "Dashboard.
", + "refs": { + "DescribeDashboardResponse$Dashboard": "Information about the dashboard.
" + } + }, + "DashboardBehavior": { "base": null, "refs": { + "AdHocFilteringOption$AvailabilityStatus": "Availability status.
", + "ExportToCSVOption$AvailabilityStatus": "Availability status.
" } }, - "ListGroupsRequest": { + "DashboardError": { + "base": "Dashboard error.
", + "refs": { + "DashboardErrorList$member": null + } + }, + "DashboardErrorList": { "base": null, "refs": { + "DashboardVersion$Errors": "Errors.
" } }, - "ListGroupsResponse": { + "DashboardErrorType": { "base": null, "refs": { + "DashboardError$Type": "Type.
" } }, - "ListUserGroupsRequest": { + "DashboardName": { "base": null, "refs": { + "CreateDashboardRequest$Name": "The display name of the dashboard.
", + "Dashboard$Name": "A display name for the dataset.
", + "DashboardSummary$Name": "A display name for the dataset.
", + "UpdateDashboardRequest$Name": "The display name of the dashboard.
" } }, - "ListUserGroupsResponse": { + "DashboardPublishOptions": { + "base": "Dashboard publish options.
", + "refs": { + "CreateDashboardRequest$DashboardPublishOptions": "Publishing options when creating dashboard.
AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED
or DISABLED
. When This is set to set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for AdHoc filtering. Enabled by default.
AvailabilityStatus for ExportToCSVOption - This can be either ENABLED
or DISABLED
. The visual option to export data to CSV is disabled when this is set to DISABLED
. Enabled by default.
VisibilityState for SheetControlsOption - This can be either COLLAPSED
or EXPANDED
. The sheet controls pane is collapsed by default when set to true. Collapsed by default.
Shorthand Syntax:
AdHocFilteringDisabled=boolean,ExportToCSVDisabled=boolean,SheetControlsCollapsed=boolean
Publishing options when creating a dashboard.
AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED
or DISABLED
. When This is set to set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for AdHoc filtering. Enabled by default.
AvailabilityStatus for ExportToCSVOption - This can be either ENABLED
or DISABLED
. The visual option to export data to CSV is disabled when this is set to DISABLED
. Enabled by default.
VisibilityState for SheetControlsOption - This can be either COLLAPSED
or EXPANDED
. The sheet controls pane is collapsed by default when set to true. Collapsed by default.
Dashboard source entity.
", + "refs": { + "CreateDashboardRequest$SourceEntity": "Source entity from which the dashboard is created. The souce entity accepts the ARN of the source template or analysis and also references the replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.
If you are creating a dashboard from a source entity in a different AWS account, use the ARN of the source template.
", + "UpdateDashboardRequest$SourceEntity": "The template or analysis from which the dashboard is created. The SouceTemplate entity accepts the Arn of the template and also references to replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.
" + } + }, + "DashboardSourceTemplate": { + "base": "Dashboard source template.
", + "refs": { + "DashboardSourceEntity$SourceTemplate": "Source template.
" + } + }, + "DashboardSummary": { + "base": "Dashboard summary.
", + "refs": { + "DashboardSummaryList$member": null + } + }, + "DashboardSummaryList": { "base": null, "refs": { + "ListDashboardsResponse$DashboardSummaryList": "A structure that contains all of the dashboards shared with the user. Provides basic information about the dashboards.
" } }, - "ListUsersRequest": { + "DashboardUIState": { "base": null, "refs": { + "SheetControlsOption$VisibilityState": "Visibility state.
" } }, - "ListUsersResponse": { + "DashboardVersion": { + "base": "Dashboard version.
", + "refs": { + "Dashboard$Version": "Version.
" + } + }, + "DashboardVersionSummary": { + "base": "Dashboard version summary.
", + "refs": { + "DashboardVersionSummaryList$member": null + } + }, + "DashboardVersionSummaryList": { "base": null, "refs": { + "ListDashboardVersionsResponse$DashboardVersionSummaryList": "A structure that contains information about each version of the dashboard.
" } }, - "MaxResults": { + "DataSet": { + "base": "Dataset.
", + "refs": { + "DescribeDataSetResponse$DataSet": "Information on the dataset.
" + } + }, + "DataSetConfiguration": { + "base": "Dataset configuration.
", + "refs": { + "DataSetConfigurationList$member": null + } + }, + "DataSetConfigurationList": { "base": null, "refs": { - "ListGroupMembershipsRequest$MaxResults": "The maximum number of results to return from this request.
", - "ListGroupsRequest$MaxResults": "The maximum number of results to return.
", - "ListUserGroupsRequest$MaxResults": "The maximum number of results to return from this request.
", - "ListUsersRequest$MaxResults": "The maximum number of results to return from this request.
" + "TemplateVersion$DataSetConfigurations": "Schema of the dataset identified by the placeholder. The idea is that any dashboard created from the template should be bound to new datasets matching the same schema described through this API. .
" } }, - "Namespace": { + "DataSetImportMode": { "base": null, "refs": { - "CreateGroupMembershipRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
Indicates whether or not you want to import the data into SPICE.
", + "DataSet$ImportMode": "Indicates whether or not you want to import the data into SPICE.
", + "DataSetSummary$ImportMode": "Indicates whether or not you want to import the data into SPICE.
", + "UpdateDataSetRequest$ImportMode": "Indicates whether or not you want to import the data into SPICE.
" } }, - "PreconditionNotMetException": { - "base": "One or more preconditions aren't met.
", + "DataSetName": { + "base": null, "refs": { + "AwsIotAnalyticsParameters$DataSetName": "Dataset name.
" } }, - "QuickSightUserNotFoundException": { - "base": "The user is not found. This error can happen in any operation that requires finding a user based on a provided user name, such as DeleteUser
, DescribeUser
, and so on.
Dataset reference.
", "refs": { + "DataSetReferenceList$member": null } }, - "RegisterUserRequest": { + "DataSetReferenceList": { "base": null, "refs": { + "DashboardSourceTemplate$DataSetReferences": "Dataset references.
", + "TemplateSourceAnalysis$DataSetReferences": "A structure containing information about the dataset references used as placeholders in the template.
" } }, - "RegisterUserResponse": { + "DataSetSchema": { + "base": "Dataset schema.
", + "refs": { + "DataSetConfiguration$DataSetSchema": "Dataset schema.
" + } + }, + "DataSetSummary": { + "base": "Dataset summary.
", + "refs": { + "DataSetSummaryList$member": null + } + }, + "DataSetSummaryList": { "base": null, "refs": { + "ListDataSetsResponse$DataSetSummaries": "The list of dataset summaries.
" } }, - "ResourceExistsException": { - "base": "The resource specified doesn't exist.
", + "DataSource": { + "base": "The structure of a data source.
", "refs": { + "DataSourceList$member": null, + "DescribeDataSourceResponse$DataSource": "The information on the data source.
" } }, - "ResourceNotFoundException": { - "base": "One or more resources can't be found.
", + "DataSourceCredentials": { + "base": "Data source credentials.
", "refs": { + "CreateDataSourceRequest$Credentials": "The credentials QuickSight uses to connect to your underlying source. Currently only username/password based credentials are supported.
", + "UpdateDataSourceRequest$Credentials": "The credentials QuickSight uses to connect to your underlying source. Currently only username/password based credentials are supported.
" } }, - "ResourceUnavailableException": { - "base": "This resource is currently unavailable.
", + "DataSourceErrorInfo": { + "base": "Error information on data source creation or update.
", "refs": { + "DataSource$ErrorInfo": "Error information from the last update or the creation of the data source.
" } }, - "RoleSessionName": { + "DataSourceErrorInfoType": { "base": null, "refs": { - "RegisterUserRequest$SessionName": "You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role
in the AWS CLI Reference.
Error type.
" } }, - "SessionLifetimeInMinutes": { + "DataSourceList": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$SessionLifetimeInMinutes": "How many minutes the session is valid. The session lifetime must be between 15 and 600 minutes.
" + "ListDataSourcesResponse$DataSources": "A list of data sources.
" } }, - "SessionLifetimeInMinutesInvalidException": { - "base": "The number of minutes specified for the lifetime of a session is not valid. The session lifetime must be from 15 to 600 minutes.
", + "DataSourceParameters": { + "base": "The parameters QuickSight uses to connect to your underlying source. This is a variant type structure. At most one of the attributes should be non-null for this structure to be valid.
", "refs": { + "CreateDataSourceRequest$DataSourceParameters": "The parameters QuickSight uses to connect to your underlying source.
", + "DataSource$DataSourceParameters": "The parameters QuickSight uses to connect to your underlying source. This is a variant type structure. At most one of the attributes should be non-null for this structure to be valid.
", + "UpdateDataSourceRequest$DataSourceParameters": "The parameters QuickSight uses to connect to your underlying source.
" } }, - "StatusCode": { + "DataSourceType": { "base": null, "refs": { - "CreateGroupMembershipResponse$Status": "The http status of the request.
", - "CreateGroupResponse$Status": "The http status of the request.
", - "DeleteGroupMembershipResponse$Status": "The http status of the request.
", - "DeleteGroupResponse$Status": "The http status of the request.
", - "DeleteUserByPrincipalIdResponse$Status": "The http status of the request.
", - "DeleteUserResponse$Status": "The http status of the request.
", - "DescribeGroupResponse$Status": "The http status of the request.
", - "DescribeUserResponse$Status": "The http status of the request.
", - "GetDashboardEmbedUrlResponse$Status": "The http status of the request.
", - "ListGroupMembershipsResponse$Status": "The http status of the request.
", - "ListGroupsResponse$Status": "The http status of the request.
", - "ListUserGroupsResponse$Status": "The HTTP status of the request.
", - "ListUsersResponse$Status": "The http status of the request.
", - "RegisterUserResponse$Status": "The http status of the request.
", - "UpdateGroupResponse$Status": "The http status of the request.
", - "UpdateUserResponse$Status": "The http status of the request.
" + "CreateDataSourceRequest$Type": "The type of the data source. Currently the supported types for this operation are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA
. Use ListDataSources
to return a list of all data sources.
The type of the data source. This indicates which database engine the data source connects to.
" + } + }, + "Database": { + "base": null, + "refs": { + "AuroraParameters$Database": "Database.
", + "AuroraPostgreSqlParameters$Database": "Database.
", + "MariaDbParameters$Database": "Database.
", + "MySqlParameters$Database": "Database.
", + "PostgreSqlParameters$Database": "Database.
", + "RdsParameters$Database": "Database.
", + "RedshiftParameters$Database": "Database.
", + "SnowflakeParameters$Database": "Database.
", + "SqlServerParameters$Database": "Database.
", + "TeradataParameters$Database": "Database.
" + } + }, + "DateTimeParameter": { + "base": "Date time parameter.
", + "refs": { + "DateTimeParameterList$member": null + } + }, + "DateTimeParameterList": { + "base": null, + "refs": { + "Parameters$DateTimeParameters": "DateTime parameters.
" + } + }, + "DecimalParameter": { + "base": "Decimal parameter.
", + "refs": { + "DecimalParameterList$member": null + } + }, + "DecimalParameterList": { + "base": null, + "refs": { + "Parameters$DecimalParameters": "Decimal parameters.
" + } + }, + "DeleteDashboardRequest": { + "base": null, + "refs": { + } + }, + "DeleteDashboardResponse": { + "base": null, + "refs": { + } + }, + "DeleteDataSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceResponse": { + "base": null, + "refs": { + } + }, + "DeleteGroupMembershipRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupMembershipResponse": { + "base": null, + "refs": { + } + }, + "DeleteGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupResponse": { + "base": null, + "refs": { + } + }, + "DeleteIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "DeleteIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "DeleteTemplateAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteTemplateAliasResponse": { + "base": null, + "refs": { + } + }, + "DeleteTemplateRequest": { + "base": null, + "refs": { + } + }, + "DeleteTemplateResponse": { + "base": null, + "refs": { + } + }, + "DeleteUserByPrincipalIdRequest": { + "base": "", + "refs": { + } + }, + "DeleteUserByPrincipalIdResponse": { + "base": null, + "refs": { + } + }, + "DeleteUserRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserResponse": { + "base": null, + "refs": { + } + }, + "Delimiter": { + "base": null, + "refs": { + "UploadSettings$Delimiter": "The delimiter between values in the file.
" + } + }, + "DescribeDashboardPermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDashboardPermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDashboardRequest": { + "base": null, + "refs": { + } + }, + "DescribeDashboardResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSetPermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetPermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSetRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcePermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSourceRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSourceResponse": { + "base": null, + "refs": { + } + }, + "DescribeGroupRequest": { + "base": null, + "refs": { + } + }, + "DescribeGroupResponse": { + "base": null, + "refs": { + } + }, + "DescribeIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "DescribeIngestionRequest": { + "base": null, + "refs": { + } + }, + "DescribeIngestionResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplateAliasRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplateAliasResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplatePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplatePermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplateRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplateResponse": { + "base": null, + "refs": { + } + }, + "DescribeUserRequest": { + "base": null, + "refs": { + } + }, + "DescribeUserResponse": { + "base": null, + "refs": { + } + }, + "Domain": { + "base": null, + "refs": { + "AmazonElasticsearchParameters$Domain": "The Amazon Elasticsearch domain.
" + } + }, + "DomainNotWhitelistedException": { + "base": "The domain specified is not on the allowlist. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.
", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "DoubleList$member": null + } + }, + "DoubleList": { + "base": null, + "refs": { + "DecimalParameter$Values": "Values.
" + } + }, + "EmbeddingUrl": { + "base": null, + "refs": { + "GetDashboardEmbedUrlResponse$EmbedUrl": "URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code that enables a single-signon session.
" + } + }, + "ErrorInfo": { + "base": "Error information on a data set SPICE ingestion.
", + "refs": { + "Ingestion$ErrorInfo": "Error information for this ingestion.
" + } + }, + "ExceptionResourceType": { + "base": null, + "refs": { + "LimitExceededException$ResourceType": "Limit exceeded.
", + "ResourceExistsException$ResourceType": "The AWS request ID for this request.
", + "ResourceNotFoundException$ResourceType": "The AWS request ID for this request.
", + "ResourceUnavailableException$ResourceType": "The resource type for this request.
" + } + }, + "ExportToCSVOption": { + "base": "Export to CSV option.
", + "refs": { + "DashboardPublishOptions$ExportToCSVOption": "Export to CSV option.
" + } + }, + "Expression": { + "base": null, + "refs": { + "CalculatedColumn$Expression": "An expression that defines the calculated column.
", + "FilterOperation$ConditionExpression": "An expression that must evaluate to a boolean value. Rows for which the expression is evaluated to true are kept in the dataset.
" + } + }, + "FileFormat": { + "base": null, + "refs": { + "UploadSettings$Format": "File format.
" + } + }, + "FilterOperation": { + "base": "A transform operation that filters rows based on some condition.
", + "refs": { + "TransformOperation$FilterOperation": "An operation that filters rows based on some condition.
" + } + }, + "GeoSpatialColumnGroup": { + "base": "Geospatial column group that denotes a hierarchy.
", + "refs": { + "ColumnGroup$GeoSpatialColumnGroup": "Geospatial column group that denotes a hierarchy.
" + } + }, + "GeoSpatialCountryCode": { + "base": null, + "refs": { + "GeoSpatialColumnGroup$CountryCode": "Country code.
" + } + }, + "GeoSpatialDataRole": { + "base": null, + "refs": { + "ColumnTag$ColumnGeographicRole": "A geospatial role for a column.
" + } + }, + "GetDashboardEmbedUrlRequest": { + "base": null, + "refs": { + } + }, + "GetDashboardEmbedUrlResponse": { + "base": null, + "refs": { + } + }, + "Group": { + "base": "A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. Currently, an Amazon QuickSight subscription can't contain more than 500 Amazon QuickSight groups.
", + "refs": { + "CreateGroupResponse$Group": "The name of the group.
", + "DescribeGroupResponse$Group": "The name of the group.
", + "GroupList$member": null, + "UpdateGroupResponse$Group": "The name of the group.
" + } + }, + "GroupDescription": { + "base": null, + "refs": { + "CreateGroupRequest$Description": "A description for the group that you want to create.
", + "Group$Description": "The group description.
", + "UpdateGroupRequest$Description": "The description for the group that you want to update.
" + } + }, + "GroupList": { + "base": null, + "refs": { + "ListGroupsResponse$GroupList": "The list of the groups.
", + "ListUserGroupsResponse$GroupList": "The list of groups the user is a member of.
" + } + }, + "GroupMember": { + "base": "A member of an Amazon QuickSight group. Currently, group members must be users. Groups can't be members of another group. .
", + "refs": { + "CreateGroupMembershipResponse$GroupMember": "The group member.
", + "GroupMemberList$member": null + } + }, + "GroupMemberList": { + "base": null, + "refs": { + "ListGroupMembershipsResponse$GroupMemberList": "The list of the members of the group.
" + } + }, + "GroupMemberName": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$MemberName": "The name of the user that you want to add to the group membership.
", + "DeleteGroupMembershipRequest$MemberName": "The name of the user that you want to delete from the group membership.
", + "GroupMember$MemberName": "The name of the group member (user).
" + } + }, + "GroupName": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$GroupName": "The name of the group that you want to add the user to.
", + "CreateGroupRequest$GroupName": "A name for the group that you want to create.
", + "DeleteGroupMembershipRequest$GroupName": "The name of the group that you want to delete the user from.
", + "DeleteGroupRequest$GroupName": "The name of the group that you want to delete.
", + "DescribeGroupRequest$GroupName": "The name of the group that you want to describe.
", + "Group$GroupName": "The name of the group.
", + "ListGroupMembershipsRequest$GroupName": "The name of the group that you want to see a membership list of.
", + "UpdateGroupRequest$GroupName": "The name of the group that you want to update.
" + } + }, + "Host": { + "base": null, + "refs": { + "AuroraParameters$Host": "Host.
", + "AuroraPostgreSqlParameters$Host": "Host.
", + "MariaDbParameters$Host": "Host.
", + "MySqlParameters$Host": "Host.
", + "PostgreSqlParameters$Host": "Host.
", + "PrestoParameters$Host": "Host.
", + "RedshiftParameters$Host": "Host. This can be blank if the ClusterId
is provided.
Host.
", + "SparkParameters$Host": "Host.
", + "SqlServerParameters$Host": "Host.
", + "TeradataParameters$Host": "Host.
" + } + }, + "IAMPolicyAssignment": { + "base": "IAM policy assignment.
", + "refs": { + "DescribeIAMPolicyAssignmentResponse$IAMPolicyAssignment": "Information describing the IAM policy assignment.
" + } + }, + "IAMPolicyAssignmentName": { + "base": null, + "refs": { + "ActiveIAMPolicyAssignment$AssignmentName": "A name for the IAM policy assignment.
", + "CreateIAMPolicyAssignmentRequest$AssignmentName": "The name of the assignment. It must be unique within an AWS account.
", + "CreateIAMPolicyAssignmentResponse$AssignmentName": "The name of the assignment. Must be unique within an AWS account.
", + "DeleteIAMPolicyAssignmentRequest$AssignmentName": "The name of the assignment.
", + "DeleteIAMPolicyAssignmentResponse$AssignmentName": "The name of the assignment.
", + "DescribeIAMPolicyAssignmentRequest$AssignmentName": "The name of the assignment.
", + "IAMPolicyAssignment$AssignmentName": "Assignment name.
", + "IAMPolicyAssignmentSummary$AssignmentName": "Assignment name.
", + "UpdateIAMPolicyAssignmentRequest$AssignmentName": "The name of the assignment. It must be unique within an AWS account.
", + "UpdateIAMPolicyAssignmentResponse$AssignmentName": "The name of the assignment.
" + } + }, + "IAMPolicyAssignmentSummary": { + "base": "IAM policy assignment Summary.
", + "refs": { + "IAMPolicyAssignmentSummaryList$member": null + } + }, + "IAMPolicyAssignmentSummaryList": { + "base": null, + "refs": { + "ListIAMPolicyAssignmentsResponse$IAMPolicyAssignments": "Information describing the IAM policy assignments.
" + } + }, + "IdentityMap": { + "base": null, + "refs": { + "CreateIAMPolicyAssignmentRequest$Identities": "QuickSight users and/or groups that you want to assign the policy to.
", + "CreateIAMPolicyAssignmentResponse$Identities": "QuickSight users and/or groups that are assigned to the IAM policy.
", + "IAMPolicyAssignment$Identities": "Identities.
", + "UpdateIAMPolicyAssignmentRequest$Identities": "QuickSight users and/or groups that you want to assign to the specified IAM policy.
", + "UpdateIAMPolicyAssignmentResponse$Identities": "QuickSight users and/or groups that are assigned to this IAM policy.
" + } + }, + "IdentityName": { + "base": null, + "refs": { + "IdentityNameList$member": null + } + }, + "IdentityNameList": { + "base": null, + "refs": { + "IdentityMap$value": null + } + }, + "IdentityType": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$IdentityType": "The authentication method the user uses to sign in (IAM only).
", + "RegisterUserRequest$IdentityType": "Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:
IAM
: A user whose identity maps to an existing IAM user or role.
QUICKSIGHT
: A user whose identity is owned and managed internally by Amazon QuickSight.
The type of identity authentication used by the user.
" + } + }, + "IdentityTypeNotSupportedException": { + "base": "The identity type specified is not supported. Supported identity types include IAM and QUICKSIGHT.
", + "refs": { + } + }, + "Ingestion": { + "base": "Information on the SPICE ingestion for a dataset.
", + "refs": { + "DescribeIngestionResponse$Ingestion": "Information about the ingestion.
", + "Ingestions$member": null + } + }, + "IngestionErrorType": { + "base": null, + "refs": { + "ErrorInfo$Type": "Error type.
" + } + }, + "IngestionId": { + "base": null, + "refs": { + "CancelIngestionRequest$IngestionId": "An ID for the ingestion.
", + "CancelIngestionResponse$IngestionId": "An ID for the ingestion.
", + "CreateIngestionRequest$IngestionId": "An ID for the ingestion.
", + "CreateIngestionResponse$IngestionId": "An ID for the ingestion.
", + "DescribeIngestionRequest$IngestionId": "An ID for the ingestion.
", + "Ingestion$IngestionId": "Ingestion ID.
" + } + }, + "IngestionMaxResults": { + "base": null, + "refs": { + "ListIngestionsRequest$MaxResults": "The maximum number of results to be returned per request.
" + } + }, + "IngestionRequestSource": { + "base": null, + "refs": { + "Ingestion$RequestSource": "Event source for this ingestion.
" + } + }, + "IngestionRequestType": { + "base": null, + "refs": { + "Ingestion$RequestType": "Type of this ingestion.
" + } + }, + "IngestionStatus": { + "base": null, + "refs": { + "CreateIngestionResponse$IngestionStatus": "The ingestion status.
", + "Ingestion$IngestionStatus": "Ingestion status.
" + } + }, + "Ingestions": { + "base": null, + "refs": { + "ListIngestionsResponse$Ingestions": "A list of the ingestions.
" + } + }, + "InputColumn": { + "base": "Metadata on a column that is used as the input of a transform operation.
", + "refs": { + "InputColumnList$member": null + } + }, + "InputColumnDataType": { + "base": null, + "refs": { + "InputColumn$Type": "The data type of the column.
" + } + }, + "InputColumnList": { + "base": null, + "refs": { + "CustomSql$Columns": "The column schema from the SQL query result set.
", + "RelationalTable$InputColumns": "The column schema of the table.
", + "S3Source$InputColumns": "A physical table type for as S3 data source.
" + } + }, + "InstanceId": { + "base": null, + "refs": { + "RdsParameters$InstanceId": "Instance ID.
" + } + }, + "IntegerParameter": { + "base": "Integer parameter.
", + "refs": { + "IntegerParameterList$member": null + } + }, + "IntegerParameterList": { + "base": null, + "refs": { + "Parameters$IntegerParameters": "Integer parameters.
" + } + }, + "InternalFailureException": { + "base": "An internal failure occurred.
", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "The NextToken
value isn't valid.
One or more parameters don't have a valid value.
", + "refs": { + } + }, + "JiraParameters": { + "base": "Jira parameters.
", + "refs": { + "DataSourceParameters$JiraParameters": "Jira parameters.
" + } + }, + "JoinInstruction": { + "base": "Join instruction.
", + "refs": { + "LogicalTableSource$JoinInstruction": "Specifies the result of a join of two logical tables.
" + } + }, + "JoinType": { + "base": null, + "refs": { + "JoinInstruction$Type": "Type.
" + } + }, + "LimitExceededException": { + "base": "A limit is exceeded.
", + "refs": { + } + }, + "ListDashboardVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListDashboardVersionsResponse": { + "base": null, + "refs": { + } + }, + "ListDashboardsRequest": { + "base": null, + "refs": { + } + }, + "ListDashboardsResponse": { + "base": null, + "refs": { + } + }, + "ListDataSetsRequest": { + "base": null, + "refs": { + } + }, + "ListDataSetsResponse": { + "base": null, + "refs": { + } + }, + "ListDataSourcesRequest": { + "base": null, + "refs": { + } + }, + "ListDataSourcesResponse": { + "base": null, + "refs": { + } + }, + "ListGroupMembershipsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupMembershipsResponse": { + "base": null, + "refs": { + } + }, + "ListGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsResponse": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsForUserRequest": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsForUserResponse": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsRequest": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsResponse": { + "base": null, + "refs": { + } + }, + "ListIngestionsRequest": { + "base": null, + "refs": { + } + }, + "ListIngestionsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "ListTemplateAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListTemplateAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListTemplateVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListTemplateVersionsResponse": { + "base": null, + "refs": { + } + }, + "ListTemplatesRequest": { + "base": null, + "refs": { + } + }, + "ListTemplatesResponse": { + "base": null, + "refs": { + } + }, + "ListUserGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListUserGroupsResponse": { + "base": null, + "refs": { + } + }, + "ListUsersRequest": { + "base": null, + "refs": { + } + }, + "ListUsersResponse": { + "base": null, + "refs": { + } + }, + "LogicalTable": { + "base": "A unit that joins and data transformations operate on. A logical table has a source, which can be either a physical table or result of a join. When it points to a physical table, a logical table acts as a mutable copy of that table through transform operations.
", + "refs": { + "LogicalTableMap$value": null + } + }, + "LogicalTableAlias": { + "base": null, + "refs": { + "LogicalTable$Alias": "A display name for the logical table.
" + } + }, + "LogicalTableId": { + "base": null, + "refs": { + "JoinInstruction$LeftOperand": "Left operand.
", + "JoinInstruction$RightOperand": "Right operand.
", + "LogicalTableMap$key": null + } + }, + "LogicalTableMap": { + "base": null, + "refs": { + "CreateDataSetRequest$LogicalTableMap": "Configures the combination and transformation of the data from the physical tables.
", + "DataSet$LogicalTableMap": "Configures the combination and transformation of the data from the physical tables.
", + "UpdateDataSetRequest$LogicalTableMap": "Configures the combination and transformation of the data from the physical tables.
" + } + }, + "LogicalTableSource": { + "base": "Information on the source of a logical table. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.
", + "refs": { + "LogicalTable$Source": "Source of this logical table.
" + } + }, + "Long": { + "base": null, + "refs": { + "DataSet$ConsumedSpiceCapacityInBytes": "The amount of SPICE capacity used by this dataset. This is 0 if the dataset isn't imported into SPICE.
", + "LongList$member": null + } + }, + "LongList": { + "base": null, + "refs": { + "IntegerParameter$Values": "Values.
" + } + }, + "ManifestFileLocation": { + "base": "Amazon S3 manifest file location.
", + "refs": { + "S3Parameters$ManifestFileLocation": "Location of the Amazon S3 manifest file. This is NULL if the manifest file was uploaded in the console.
" + } + }, + "MariaDbParameters": { + "base": "MariaDB parameters.
", + "refs": { + "DataSourceParameters$MariaDbParameters": "MariaDB parameters.
" + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListDashboardVersionsRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListDashboardsRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListDataSetsRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListDataSourcesRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListGroupMembershipsRequest$MaxResults": "The maximum number of results to return from this request.
", + "ListGroupsRequest$MaxResults": "The maximum number of results to return.
", + "ListIAMPolicyAssignmentsForUserRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListIAMPolicyAssignmentsRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListTemplateAliasesRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListTemplateVersionsRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListTemplatesRequest$MaxResults": "The maximum number of results to be returned per request.
", + "ListUserGroupsRequest$MaxResults": "The maximum number of results to return from this request.
", + "ListUsersRequest$MaxResults": "The maximum number of results to return from this request.
" + } + }, + "MySqlParameters": { + "base": "MySQL parameters.
", + "refs": { + "DataSourceParameters$MySqlParameters": "MySQL parameters.
" + } + }, + "Namespace": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace that contains the assignment.
", + "DeleteGroupMembershipRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace that contains the assignment.
", + "DeleteUserByPrincipalIdRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace that contains the assignment.
", + "DescribeUserRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace of the assignment.
", + "ListIAMPolicyAssignmentsRequest$Namespace": "The namespace for this assignment.
", + "ListUserGroupsRequest$Namespace": "The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace of the assignment.
", + "UpdateUserRequest$Namespace": "The namespace. Currently, you should set this to default
.
Message.
", + "DataSetReference$DataSetPlaceholder": "Dataset placeholder.
", + "DateTimeParameter$Name": "A display name for the dataset.
", + "DecimalParameter$Name": "A display name for the dataset.
", + "IntegerParameter$Name": "A display name for the dataset.
", + "StringParameter$Name": "A display name for the dataset.
", + "TemplateError$Message": "Description of the error type.
" + } + }, + "OnClause": { + "base": null, + "refs": { + "JoinInstruction$OnClause": "On Clause.
" + } + }, + "OptionalPort": { + "base": null, + "refs": { + "RedshiftParameters$Port": "Port. This can be blank if the ClusterId
is provided.
Output column.
", + "refs": { + "OutputColumnList$member": null + } + }, + "OutputColumnList": { + "base": null, + "refs": { + "DataSet$OutputColumns": "The list of columns after all transforms. These columns are available in templates, analyses, and dashboards.
" + } + }, + "Parameters": { + "base": "Parameters.
", + "refs": { + "CreateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters and some parameters might accept multiple values. You could use the following structure to override two string parameters that accept multiple values:
", + "UpdateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard.
" + } + }, + "Password": { + "base": null, + "refs": { + "CredentialPair$Password": "Password.
" + } + }, + "PhysicalTable": { + "base": "A view of a data source. Contains information on the shape of the data in the underlying source. This is a variant type structure. No more than one of the attributes can be non-null for this structure to be valid.
", + "refs": { + "PhysicalTableMap$value": null + } + }, + "PhysicalTableId": { + "base": null, + "refs": { + "LogicalTableSource$PhysicalTableId": "Physical table ID.
", + "PhysicalTableMap$key": null + } + }, + "PhysicalTableMap": { + "base": null, + "refs": { + "CreateDataSetRequest$PhysicalTableMap": "Declares the physical tables that are available in the underlying data sources.
", + "DataSet$PhysicalTableMap": "Declares the physical tables that are available in the underlying data sources.
", + "UpdateDataSetRequest$PhysicalTableMap": "Declares the physical tables that are available in the underlying data sources.
" + } + }, + "Port": { + "base": null, + "refs": { + "AuroraParameters$Port": "Port.
", + "AuroraPostgreSqlParameters$Port": "Port.
", + "MariaDbParameters$Port": "Port.
", + "MySqlParameters$Port": "Port.
", + "PostgreSqlParameters$Port": "Port.
", + "PrestoParameters$Port": "Port.
", + "SparkParameters$Port": "Port.
", + "SqlServerParameters$Port": "Port.
", + "TeradataParameters$Port": "Port.
" + } + }, + "PositiveInteger": { + "base": null, + "refs": { + "TwitterParameters$MaxRows": "Maximum number of rows to query Twitter.
", + "UploadSettings$StartFromRow": "A row number to start reading data from.
" + } + }, + "PostgreSqlParameters": { + "base": "PostgreSQL parameters.
", + "refs": { + "DataSourceParameters$PostgreSqlParameters": "PostgreSQL parameters.
" + } + }, + "PreconditionNotMetException": { + "base": "One or more preconditions aren't met.
", + "refs": { + } + }, + "PrestoParameters": { + "base": "Presto parameters.
", + "refs": { + "DataSourceParameters$PrestoParameters": "Presto parameters.
" + } + }, + "Principal": { + "base": null, + "refs": { + "ResourcePermission$Principal": "The ARN of a QuickSight user or group, or an IAM ARN. If you are using cross-account resource sharing, this is the IAM ARN of an account root. Otherwise, it is the ARN of a QuickSight user or group. .
" + } + }, + "ProjectOperation": { + "base": "A transform operation that projects columns. Operations that come after a projection can only refer to projected columns.
", + "refs": { + "TransformOperation$ProjectOperation": "An operation that projects columns. Operations that come after a projection can only refer to projected columns.
" + } + }, + "ProjectedColumnList": { + "base": null, + "refs": { + "ProjectOperation$ProjectedColumns": "Projected columns.
" + } + }, + "Query": { + "base": null, + "refs": { + "TwitterParameters$Query": "Twitter query string.
" + } + }, + "QueueInfo": { + "base": "Information on queued dataset SPICE ingestion.
", + "refs": { + "Ingestion$QueueInfo": null + } + }, + "QuickSightUserNotFoundException": { + "base": "The user is not found. This error can happen in any operation that requires finding a user based on a provided user name, such as DeleteUser
, DescribeUser
, and so on.
RDS parameters.
", + "refs": { + "DataSourceParameters$RdsParameters": "RDS parameters.
" + } + }, + "RedshiftParameters": { + "base": "Redshift parameters. The ClusterId
field can be blank if Host
and Port
are both set, and the other way around.
Redshift parameters.
" + } + }, + "RegisterUserRequest": { + "base": null, + "refs": { + } + }, + "RegisterUserResponse": { + "base": null, + "refs": { + } + }, + "RelationalTable": { + "base": "A physical table type for relational data sources.
", + "refs": { + "PhysicalTable$RelationalTable": "A physical table type for relational data sources.
" + } + }, + "RelationalTableName": { + "base": null, + "refs": { + "RelationalTable$Name": "Name of the relational table.
" + } + }, + "RelationalTableSchema": { + "base": null, + "refs": { + "RelationalTable$Schema": "The schema name. Applies to certain relational database engines.
" + } + }, + "RenameColumnOperation": { + "base": "A transform operation that renames a column.
", + "refs": { + "TransformOperation$RenameColumnOperation": "An operation that renames a column.
" + } + }, + "ResourceExistsException": { + "base": "The resource specified already exists.
", + "refs": { + } + }, + "ResourceId": { + "base": null, + "refs": { + "CreateDataSetRequest$DataSetId": "An ID for the dataset you want to create. This is unique per region per AWS account.
", + "CreateDataSetResponse$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "CreateDataSetResponse$IngestionId": "The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE
", + "CreateDataSourceRequest$DataSourceId": "An ID for the data source. This is unique per AWS Region per AWS account.
", + "CreateDataSourceResponse$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DataSet$DataSetId": "The ID of the dataset.
", + "DataSetSummary$DataSetId": "The ID of the dataset.
", + "DataSource$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DeleteDataSetRequest$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "DeleteDataSetResponse$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "DeleteDataSourceRequest$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DeleteDataSourceResponse$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DescribeDataSetPermissionsRequest$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "DescribeDataSetPermissionsResponse$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "DescribeDataSetRequest$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "DescribeDataSourcePermissionsRequest$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DescribeDataSourcePermissionsResponse$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "DescribeDataSourceRequest$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "UpdateDataSetPermissionsRequest$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "UpdateDataSetPermissionsResponse$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "UpdateDataSetRequest$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "UpdateDataSetResponse$DataSetId": "The ID for the dataset you want to create. This is unique per region per AWS account.
", + "UpdateDataSetResponse$IngestionId": "The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE
", + "UpdateDataSourcePermissionsRequest$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "UpdateDataSourcePermissionsResponse$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "UpdateDataSourceRequest$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
", + "UpdateDataSourceResponse$DataSourceId": "The ID of the data source. This is unique per AWS Region per AWS account.
" + } + }, + "ResourceName": { + "base": null, + "refs": { + "CreateDataSetRequest$Name": "The display name for the dataset.
", + "CreateDataSourceRequest$Name": "A display name for the data source.
", + "DataSet$Name": "A display name for the dataset.
", + "DataSetSummary$Name": "A display name for the dataset.
", + "DataSource$Name": "A display name for the data source.
", + "UpdateDataSetRequest$Name": "The display name for the dataset.
", + "UpdateDataSourceRequest$Name": "A display name for the data source.
" + } + }, + "ResourceNotFoundException": { + "base": "One or more resources can't be found.
", + "refs": { + } + }, + "ResourcePermission": { + "base": "Permission for the resource.
", + "refs": { + "ResourcePermissionList$member": null, + "UpdateResourcePermissionList$member": null + } + }, + "ResourcePermissionList": { + "base": null, + "refs": { + "CreateDashboardRequest$Permissions": "A structure that contains the permissions of the dashboard. You can use this for granting permissions with principal and action information.
", + "CreateDataSetRequest$Permissions": "A list of resource permissions on the dataset.
", + "CreateDataSourceRequest$Permissions": "A list of resource permissions on the data source.
", + "CreateTemplateRequest$Permissions": "A list of resource permissions to be set on the template. The shorthand syntax should look similar to this: Shorthand Syntax: Principal=string,Actions=string,string ...
A structure that contains the permissions of the dashboard.
", + "DescribeDataSetPermissionsResponse$Permissions": "A list of resource permissions on the dataset.
", + "DescribeDataSourcePermissionsResponse$Permissions": "A list of resource permissions on the data source.
", + "DescribeTemplatePermissionsResponse$Permissions": "A list of resource permissions to be set on the template.
", + "UpdateDashboardPermissionsResponse$Permissions": "Information about the permissions on the dashboard.
", + "UpdateDataSetPermissionsRequest$GrantPermissions": "The resource permissions that you want to grant to the dataset.
", + "UpdateDataSetPermissionsRequest$RevokePermissions": "The resource permissions that you want to revoke from the dataset.
", + "UpdateDataSourcePermissionsRequest$GrantPermissions": "A list of resource permissions that you want to grant on the data source.
", + "UpdateDataSourcePermissionsRequest$RevokePermissions": "A list of resource permissions that you want to revoke on the data source.
", + "UpdateTemplatePermissionsResponse$Permissions": "A list of resource permissions to be set on the template.
" + } + }, + "ResourceStatus": { + "base": null, + "refs": { + "CreateDashboardResponse$CreationStatus": "The creation status of the dashboard create request.
", + "CreateDataSourceResponse$CreationStatus": "The status of creating the data source.
", + "CreateTemplateResponse$CreationStatus": "The template creation status.
", + "DashboardVersion$Status": "The http status of the request.
", + "DashboardVersionSummary$Status": "The http status of the request.
", + "DataSource$Status": "The http status of the request.
", + "TemplateVersion$Status": "The http status of the request.
", + "TemplateVersionSummary$Status": "The status of the template version.
", + "UpdateDashboardResponse$CreationStatus": "The creation status of the request.
", + "UpdateDataSourceResponse$UpdateStatus": "The update status of the data source's last update.
", + "UpdateTemplateResponse$CreationStatus": "The creation status of the template.
" + } + }, + "ResourceUnavailableException": { + "base": "This resource is currently unavailable.
", + "refs": { + } + }, + "RestrictiveResourceId": { + "base": null, + "refs": { + "CreateDashboardRequest$DashboardId": "The ID for the dashboard, also added to IAM policy.
", + "CreateDashboardResponse$DashboardId": "The ID for the dashboard.
", + "CreateTemplateAliasRequest$TemplateId": "An ID for the template.
", + "CreateTemplateRequest$TemplateId": "An ID for the template you want to create. This is unique per AWS region per AWS account.
", + "CreateTemplateResponse$TemplateId": "The ID of the template.
", + "Dashboard$DashboardId": "Dashboard ID.
", + "DashboardSummary$DashboardId": "Dashboard ID.
", + "DeleteDashboardRequest$DashboardId": "The ID for the dashboard.
", + "DeleteDashboardResponse$DashboardId": "The ID of the dashboard.
", + "DeleteTemplateAliasRequest$TemplateId": "An ID for the template.
", + "DeleteTemplateAliasResponse$TemplateId": "An ID for the template.
", + "DeleteTemplateRequest$TemplateId": "An ID for the template you want to delete.
", + "DeleteTemplateResponse$TemplateId": "An ID for the template.
", + "DescribeDashboardPermissionsRequest$DashboardId": "The ID for the dashboard, also added to IAM policy.
", + "DescribeDashboardPermissionsResponse$DashboardId": "The ID for the dashboard.
", + "DescribeDashboardRequest$DashboardId": "The ID for the dashboard.
", + "DescribeTemplateAliasRequest$TemplateId": "An ID for the template.
", + "DescribeTemplatePermissionsRequest$TemplateId": "The ID for the template.
", + "DescribeTemplatePermissionsResponse$TemplateId": "The ID for the template.
", + "DescribeTemplateRequest$TemplateId": "An ID for the template.
", + "GetDashboardEmbedUrlRequest$DashboardId": "The ID for the dashboard, also added to IAM policy
", + "ListDashboardVersionsRequest$DashboardId": "The ID for the dashboard.
", + "ListTemplateAliasesRequest$TemplateId": "The ID for the template.
", + "ListTemplateVersionsRequest$TemplateId": "The ID for the template.
", + "Template$TemplateId": "The ID for the template. This is unique per region per AWS account.
", + "TemplateSummary$TemplateId": "The ID of the template. This is unique per region per AWS account.
", + "UpdateDashboardPermissionsRequest$DashboardId": "The ID for the dashboard.
", + "UpdateDashboardPermissionsResponse$DashboardId": "The ID for the dashboard.
", + "UpdateDashboardPublishedVersionRequest$DashboardId": "The ID for the dashboard.
", + "UpdateDashboardPublishedVersionResponse$DashboardId": "The ID for the dashboard.
", + "UpdateDashboardRequest$DashboardId": "The ID for the dashboard.
", + "UpdateDashboardResponse$DashboardId": "The ID for the dashboard.
", + "UpdateTemplateAliasRequest$TemplateId": "The ID for the template.
", + "UpdateTemplatePermissionsRequest$TemplateId": "The ID for the template.
", + "UpdateTemplatePermissionsResponse$TemplateId": "The ID for the template.
", + "UpdateTemplateRequest$TemplateId": "The ID for the template.
", + "UpdateTemplateResponse$TemplateId": "The ID for the template.
" + } + }, + "RoleSessionName": { + "base": null, + "refs": { + "RegisterUserRequest$SessionName": "You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role
in the AWS CLI Reference.
Information on rows during a data set SPICE ingestion.
", + "refs": { + "Ingestion$RowInfo": null + } + }, + "RowLevelPermissionDataSet": { + "base": "Row-level security configuration on the dataset.
", + "refs": { + "CreateDataSetRequest$RowLevelPermissionDataSet": "Row-level security configuration on the data you want to create.
", + "DataSet$RowLevelPermissionDataSet": "Row-level security configuration on the dataset.
", + "DataSetSummary$RowLevelPermissionDataSet": "Row-level security configuration on the dataset.
", + "UpdateDataSetRequest$RowLevelPermissionDataSet": "Row-level security configuration on the data you want to create.
" + } + }, + "RowLevelPermissionPolicy": { + "base": null, + "refs": { + "RowLevelPermissionDataSet$PermissionPolicy": "Permission policy.
" + } + }, + "S3Bucket": { + "base": null, + "refs": { + "ManifestFileLocation$Bucket": "Amazon S3 bucket.
" + } + }, + "S3Key": { + "base": null, + "refs": { + "ManifestFileLocation$Key": "Amazon S3 key that identifies an object.
" + } + }, + "S3Parameters": { + "base": "S3 parameters.
", + "refs": { + "DataSourceParameters$S3Parameters": "S3 parameters.
" + } + }, + "S3Source": { + "base": "A physical table type for as S3 data source.
", + "refs": { + "PhysicalTable$S3Source": "A physical table type for as S3 data source.
" + } + }, + "ServiceNowParameters": { + "base": "ServiceNow parameters.
", + "refs": { + "DataSourceParameters$ServiceNowParameters": "ServiceNow parameters.
" + } + }, + "SessionLifetimeInMinutes": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$SessionLifetimeInMinutes": "How many minutes the session is valid. The session lifetime must be between 15 and 600 minutes.
" + } + }, + "SessionLifetimeInMinutesInvalidException": { + "base": "The number of minutes specified for the lifetime of a session is not valid. The session lifetime must be from 15 to 600 minutes.
", + "refs": { + } + }, + "SheetControlsOption": { + "base": "Sheet controls option.
", + "refs": { + "DashboardPublishOptions$SheetControlsOption": "Sheet controls option.
" + } + }, + "SiteBaseUrl": { + "base": null, + "refs": { + "JiraParameters$SiteBaseUrl": "The base URL of the Jira site.
", + "ServiceNowParameters$SiteBaseUrl": "URL of the base site.
" + } + }, + "SnowflakeParameters": { + "base": "Snowflake parameters.
", + "refs": { + "DataSourceParameters$SnowflakeParameters": "Snowflake parameters.
" + } + }, + "SparkParameters": { + "base": "Spark parameters.
", + "refs": { + "DataSourceParameters$SparkParameters": "Spark parameters.
" + } + }, + "SqlQuery": { + "base": null, + "refs": { + "CustomSql$SqlQuery": "The SQL query.
" + } + }, + "SqlServerParameters": { + "base": "SQL Server parameters.
", + "refs": { + "DataSourceParameters$SqlServerParameters": "SQL Server parameters.
" + } + }, + "SslProperties": { + "base": "SSL properties that apply when QuickSight connects to your underlying data source.
", + "refs": { + "CreateDataSourceRequest$SslProperties": "SSL properties that apply when QuickSight connects to your underlying source.
", + "DataSource$SslProperties": "SSL properties that apply when QuickSight connects to your underlying source.
", + "UpdateDataSourceRequest$SslProperties": "SSL properties that apply when QuickSight connects to your underlying source.
" + } + }, + "StatusCode": { + "base": null, + "refs": { + "CancelIngestionResponse$Status": "The http status of the request.
", + "CreateDashboardResponse$Status": "The http status of the request.
", + "CreateDataSetResponse$Status": "The http status of the request.
", + "CreateDataSourceResponse$Status": "The http status of the request.
", + "CreateGroupMembershipResponse$Status": "The http status of the request.
", + "CreateGroupResponse$Status": "The http status of the request.
", + "CreateIAMPolicyAssignmentResponse$Status": "The http status of the request.
", + "CreateIngestionResponse$Status": "The http status of the request.
", + "CreateTemplateAliasResponse$Status": "The http status of the request.
", + "CreateTemplateResponse$Status": "The http status of the request.
", + "DeleteDashboardResponse$Status": "The http status of the request.
", + "DeleteDataSetResponse$Status": "The http status of the request.
", + "DeleteDataSourceResponse$Status": "The http status of the request.
", + "DeleteGroupMembershipResponse$Status": "The http status of the request.
", + "DeleteGroupResponse$Status": "The http status of the request.
", + "DeleteIAMPolicyAssignmentResponse$Status": "The http status of the request.
", + "DeleteTemplateAliasResponse$Status": "The http status of the request.
", + "DeleteTemplateResponse$Status": "The http status of the request.
", + "DeleteUserByPrincipalIdResponse$Status": "The http status of the request.
", + "DeleteUserResponse$Status": "The http status of the request.
", + "DescribeDashboardPermissionsResponse$Status": "The http status of the request.
", + "DescribeDashboardResponse$Status": "The http status of this request.
", + "DescribeDataSetPermissionsResponse$Status": "The http status of the request.
", + "DescribeDataSetResponse$Status": "The http status of the request.
", + "DescribeDataSourcePermissionsResponse$Status": "The http status of the request.
", + "DescribeDataSourceResponse$Status": "The http status of the request.
", + "DescribeGroupResponse$Status": "The http status of the request.
", + "DescribeIAMPolicyAssignmentResponse$Status": "The http status of the request.
", + "DescribeIngestionResponse$Status": "The http status of the request.
", + "DescribeTemplateAliasResponse$Status": "The http status of the request.
", + "DescribeTemplatePermissionsResponse$Status": "The http status of the request.
", + "DescribeTemplateResponse$Status": "The http status of the request.
", + "DescribeUserResponse$Status": "The http status of the request.
", + "GetDashboardEmbedUrlResponse$Status": "The http status of the request.
", + "ListDashboardVersionsResponse$Status": "The http status of the request.
", + "ListDashboardsResponse$Status": "The http status of the request.
", + "ListDataSetsResponse$Status": "The http status of the request.
", + "ListDataSourcesResponse$Status": "The http status of the request.
", + "ListGroupMembershipsResponse$Status": "The http status of the request.
", + "ListGroupsResponse$Status": "The http status of the request.
", + "ListIAMPolicyAssignmentsForUserResponse$Status": "The http status of the request.
", + "ListIAMPolicyAssignmentsResponse$Status": "The http status of the request.
", + "ListIngestionsResponse$Status": "The http status of the request.
", + "ListTagsForResourceResponse$Status": "The http status of the request.
", + "ListTemplateAliasesResponse$Status": "The http status of the request.
", + "ListTemplateVersionsResponse$Status": "The http status of the request.
", + "ListTemplatesResponse$Status": "The http status of the request.
", + "ListUserGroupsResponse$Status": "The HTTP status of the request.
", + "ListUsersResponse$Status": "The http status of the request.
", + "RegisterUserResponse$Status": "The http status of the request.
", + "TagResourceResponse$Status": "The http status of the request.
", + "UntagResourceResponse$Status": "The http status of the request.
", + "UpdateDashboardPermissionsResponse$Status": "The http status of the request.
", + "UpdateDashboardPublishedVersionResponse$Status": "The http status of the request.
", + "UpdateDashboardResponse$Status": "The http status of the request.
", + "UpdateDataSetPermissionsResponse$Status": "The http status of the request.
", + "UpdateDataSetResponse$Status": "The http status of the request.
", + "UpdateDataSourcePermissionsResponse$Status": "The http status of the request.
", + "UpdateDataSourceResponse$Status": "The http status of the request.
", + "UpdateGroupResponse$Status": "The http status of the request.
", + "UpdateIAMPolicyAssignmentResponse$Status": "The http status of the request.
", + "UpdateTemplateAliasResponse$Status": "The http status of the request.
", + "UpdateTemplatePermissionsResponse$Status": "The http status of the request.
", + "UpdateTemplateResponse$Status": "The http status of the request.
", + "UpdateUserResponse$Status": "The http status of the request.
" } }, "String": { @@ -409,20 +2196,55 @@ "refs": { "AccessDeniedException$Message": null, "AccessDeniedException$RequestId": "The AWS request id for this request.
", + "ActionList$member": null, + "ColumnGroupColumnSchema$Name": "The name of the column group's column schema.
", + "ColumnGroupSchema$Name": "The name of the column group schema.
", + "ColumnSchema$Name": "The name of the column schema.
", + "ColumnSchema$DataType": "The data type of the column schema.
", + "ColumnSchema$GeographicRole": "The geographic role of the column schema.
", + "ConcurrentUpdatingException$Message": null, + "ConcurrentUpdatingException$RequestId": null, + "ConflictException$Message": null, + "ConflictException$RequestId": "The AWS request id for this request.
", + "CreateDashboardResponse$RequestId": "The AWS request ID for this operation.
", + "CreateDataSetResponse$RequestId": "The AWS request ID for this operation.
", + "CreateDataSourceResponse$RequestId": "The AWS request ID for this operation.
", "CreateGroupMembershipResponse$RequestId": "The AWS request ID for this operation.
", "CreateGroupResponse$RequestId": "The AWS request ID for this operation.
", + "CreateIAMPolicyAssignmentResponse$AssignmentId": "An ID for the assignment.
", + "CreateIAMPolicyAssignmentResponse$RequestId": "The AWS request ID for this operation.
", + "CreateTemplateAliasResponse$RequestId": "The AWS request ID for this operation.
", + "CreateTemplateResponse$RequestId": "The AWS request ID for this operation.
", + "DataSetConfiguration$Placeholder": "Placeholder.
", + "DataSourceErrorInfo$Message": "Error message.
", + "DeleteDashboardResponse$RequestId": "The AWS request ID for this operation.
", + "DeleteDataSetResponse$RequestId": "The AWS request ID for this operation.
", + "DeleteDataSourceResponse$RequestId": "The AWS request ID for this operation.
", "DeleteGroupMembershipResponse$RequestId": "The AWS request ID for this operation.
", "DeleteGroupResponse$RequestId": "The AWS request ID for this operation.
", + "DeleteIAMPolicyAssignmentResponse$RequestId": "The AWS request ID for this operation.
", + "DeleteTemplateAliasResponse$RequestId": "The AWS request ID for this operation.
", + "DeleteTemplateResponse$RequestId": "The AWS request ID for this operation.
", "DeleteUserByPrincipalIdRequest$PrincipalId": "The principal ID of the user.
", "DeleteUserByPrincipalIdResponse$RequestId": "The AWS request ID for this operation.
", "DeleteUserResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDashboardPermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDashboardResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDataSetPermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDataSetResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDataSourcePermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeDataSourceResponse$RequestId": "The AWS request ID for this operation.
", "DescribeGroupResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeIAMPolicyAssignmentResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeTemplateAliasResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeTemplatePermissionsResponse$RequestId": "The AWS request ID for this operation.
", "DescribeUserResponse$RequestId": "The AWS request ID for this operation.
", "DomainNotWhitelistedException$Message": null, "DomainNotWhitelistedException$RequestId": "The AWS request ID for this request.
", - "GetDashboardEmbedUrlRequest$DashboardId": "The ID for the dashboard, also added to IAM policy
", "GetDashboardEmbedUrlResponse$RequestId": "The AWS request ID for this operation.
", "Group$PrincipalId": "The principal ID of the group.
", + "IAMPolicyAssignment$AssignmentId": "Assignment ID.
", + "IdentityMap$key": null, "IdentityTypeNotSupportedException$Message": null, "IdentityTypeNotSupportedException$RequestId": "The AWS request ID for this request.
", "InternalFailureException$Message": null, @@ -433,12 +2255,40 @@ "InvalidParameterValueException$RequestId": "The AWS request ID for this request.
", "LimitExceededException$Message": null, "LimitExceededException$RequestId": "The AWS request ID for this request.
", + "ListDashboardVersionsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDashboardVersionsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDashboardVersionsResponse$RequestId": "The AWS request ID for this operation.
", + "ListDashboardsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDashboardsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDashboardsResponse$RequestId": "The AWS request ID for this operation.
", + "ListDataSetsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDataSetsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDataSetsResponse$RequestId": "The AWS request ID for this operation.
", + "ListDataSourcesRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDataSourcesResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListDataSourcesResponse$RequestId": "The AWS request ID for this operation.
", "ListGroupMembershipsRequest$NextToken": "A pagination token that can be used in a subsequent request.
", "ListGroupMembershipsResponse$NextToken": "A pagination token that can be used in a subsequent request.
", "ListGroupMembershipsResponse$RequestId": "The AWS request ID for this operation.
", "ListGroupsRequest$NextToken": "A pagination token that can be used in a subsequent request.
", "ListGroupsResponse$NextToken": "A pagination token that can be used in a subsequent request.
", "ListGroupsResponse$RequestId": "The AWS request ID for this operation.
", + "ListIAMPolicyAssignmentsForUserRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIAMPolicyAssignmentsForUserResponse$RequestId": "The AWS request ID for this operation.
", + "ListIAMPolicyAssignmentsForUserResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIAMPolicyAssignmentsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIAMPolicyAssignmentsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIAMPolicyAssignmentsResponse$RequestId": "The AWS request ID for this operation.
", + "ListTagsForResourceResponse$RequestId": "The AWS request ID for this operation.
", + "ListTemplateAliasesRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplateAliasesResponse$RequestId": "The AWS request ID for this operation.
", + "ListTemplateAliasesResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplateVersionsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplateVersionsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplateVersionsResponse$RequestId": "The AWS request ID for this operation.
", + "ListTemplatesRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplatesResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListTemplatesResponse$RequestId": "The AWS request ID for this operation.
", "ListUserGroupsRequest$NextToken": "A pagination token that can be used in a subsequent request.
", "ListUserGroupsResponse$NextToken": "A pagination token that can be used in a subsequent request.
", "ListUserGroupsResponse$RequestId": "The AWS request ID for this operation.
", @@ -447,6 +2297,7 @@ "ListUsersResponse$RequestId": "The AWS request ID for this operation.
", "PreconditionNotMetException$Message": null, "PreconditionNotMetException$RequestId": "The AWS request ID for this request.
", + "ProjectedColumnList$member": null, "QuickSightUserNotFoundException$Message": null, "QuickSightUserNotFoundException$RequestId": "The AWS request ID for this request.
", "RegisterUserRequest$Email": "The email address of the user that you want to register.
", @@ -461,27 +2312,357 @@ "ResourceUnavailableException$RequestId": "The AWS request ID for this request.
", "SessionLifetimeInMinutesInvalidException$Message": null, "SessionLifetimeInMinutesInvalidException$RequestId": "The AWS request ID for this request.
", + "StringList$member": null, + "TagResourceResponse$RequestId": "The AWS request ID for this operation.
", "ThrottlingException$Message": null, "ThrottlingException$RequestId": "The AWS request ID for this request.
", "UnsupportedUserEditionException$Message": null, "UnsupportedUserEditionException$RequestId": "The AWS request ID for this request.
", + "UntagResourceResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDashboardPermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDashboardPublishedVersionResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDashboardResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDataSetPermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDataSetResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDataSourcePermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateDataSourceResponse$RequestId": "The AWS request ID for this operation.
", "UpdateGroupResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateIAMPolicyAssignmentResponse$AssignmentId": "The ID of the assignment.
", + "UpdateIAMPolicyAssignmentResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateTemplateAliasResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateTemplatePermissionsResponse$RequestId": "The AWS request ID for this operation.
", + "UpdateTemplateResponse$RequestId": "The AWS request ID for this operation.
", "UpdateUserRequest$Email": "The email address of the user that you want to update.
", "UpdateUserResponse$RequestId": "The AWS request ID for this operation.
", "User$Email": "The user's email address.
", "User$PrincipalId": "The principal ID of the user.
" } }, + "StringList": { + "base": null, + "refs": { + "StringParameter$Values": "Values.
" + } + }, + "StringParameter": { + "base": "String parameter.
", + "refs": { + "StringParameterList$member": null + } + }, + "StringParameterList": { + "base": null, + "refs": { + "Parameters$StringParameters": "String parameters.
" + } + }, + "Tag": { + "base": "The keys of the key-value pairs for the resource tag or tags assigned to the resource.
", + "refs": { + "TagList$member": null + } + }, + "TagColumnOperation": { + "base": "A transform operation that tags a column with additional information.
", + "refs": { + "TransformOperation$TagColumnOperation": "An operation that tags a column with additional information.
" + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "Tag key.
", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "The keys of the key-value pairs for the resource tag or tags assigned to the resource.
" + } + }, + "TagList": { + "base": null, + "refs": { + "CreateDashboardRequest$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the dashboard.
", + "CreateDataSetRequest$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the dataset.
", + "CreateDataSourceRequest$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the data source.
", + "CreateTemplateRequest$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.
", + "ListTagsForResourceResponse$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.
", + "TagResourceRequest$Tags": "Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.
" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "Tag value.
" + } + }, + "Template": { + "base": "A template object. A template is an entity in QuickSight which encapsulates the metadata required to create an analysis that can be used to create dashboard. It adds a layer of abstraction by replacing the dataset associated with the analysis with placeholders. Templates can be used to create dashboards by replacing dataset placeholders with datasets which follow the same schema that was used to create the source analysis and template.
You can share templates across AWS accounts by allowing users in other AWS accounts to create a template or a dashboard from an existing template.
", + "refs": { + "DescribeTemplateResponse$Template": "The template structure of the object you want to describe.
" + } + }, + "TemplateAlias": { + "base": "The template alias.
", + "refs": { + "CreateTemplateAliasResponse$TemplateAlias": "Information on the template alias.
", + "DescribeTemplateAliasResponse$TemplateAlias": "Information about the template alias.
", + "TemplateAliasList$member": null, + "UpdateTemplateAliasResponse$TemplateAlias": "The template alias.
" + } + }, + "TemplateAliasList": { + "base": null, + "refs": { + "ListTemplateAliasesResponse$TemplateAliasList": "A structure containing the list of template aliases.
" + } + }, + "TemplateError": { + "base": "List of errors that occurred when the template version creation failed.
", + "refs": { + "TemplateErrorList$member": null + } + }, + "TemplateErrorList": { + "base": null, + "refs": { + "TemplateVersion$Errors": "Errors associated with the template.
" + } + }, + "TemplateErrorType": { + "base": null, + "refs": { + "TemplateError$Type": "Type of error.
" + } + }, + "TemplateName": { + "base": null, + "refs": { + "CreateTemplateRequest$Name": "A display name for the template.
", + "Template$Name": "The display name of the template.
", + "TemplateSummary$Name": "A display name for the template.
", + "UpdateTemplateRequest$Name": "The name for the template.
" + } + }, + "TemplateSourceAnalysis": { + "base": "The source analysis of the template.
", + "refs": { + "TemplateSourceEntity$SourceAnalysis": "The source analysis, if it is based on an analysis.
" + } + }, + "TemplateSourceEntity": { + "base": "The source entity of the template.
", + "refs": { + "CreateTemplateRequest$SourceEntity": "The ARN of the source entity from which this template is being created. Templates can be currently created from an analysis or another template. If the ARN is for an analysis, you must include its dataset references.
", + "UpdateTemplateRequest$SourceEntity": "The source QuickSight entity from which this template is being created. Templates can be currently created from an Analysis or another template.
" + } + }, + "TemplateSourceTemplate": { + "base": "The source template of the template.
", + "refs": { + "TemplateSourceEntity$SourceTemplate": "The source template, if it is based on an template.
" + } + }, + "TemplateSummary": { + "base": "The template summary.
", + "refs": { + "TemplateSummaryList$member": null + } + }, + "TemplateSummaryList": { + "base": null, + "refs": { + "ListTemplatesResponse$TemplateSummaryList": "A structure containing information about the templates in the list.
" + } + }, + "TemplateVersion": { + "base": "A version of a template.
", + "refs": { + "Template$Version": "A structure describing the versions of the template.
" + } + }, + "TemplateVersionSummary": { + "base": "The template version.
", + "refs": { + "TemplateVersionSummaryList$member": null + } + }, + "TemplateVersionSummaryList": { + "base": null, + "refs": { + "ListTemplateVersionsResponse$TemplateVersionSummaryList": "A structure containing a list of all the versions of the specified template.
" + } + }, + "TeradataParameters": { + "base": "Teradata parameters.
", + "refs": { + "DataSourceParameters$TeradataParameters": "Teradata parameters.
" + } + }, + "TextQualifier": { + "base": null, + "refs": { + "UploadSettings$TextQualifier": "Text qualifier.
" + } + }, "ThrottlingException": { "base": "Access is throttled.
", "refs": { } }, + "Timestamp": { + "base": null, + "refs": { + "Dashboard$CreatedTime": "The time this was created.
", + "Dashboard$LastPublishedTime": "The last time this was published.
", + "Dashboard$LastUpdatedTime": "The last time this was updated.
", + "DashboardSummary$CreatedTime": "The time this was created.
", + "DashboardSummary$LastUpdatedTime": "The last time this was updated.
", + "DashboardSummary$LastPublishedTime": "The last time this was published.
", + "DashboardVersion$CreatedTime": "The time this was created.
", + "DashboardVersionSummary$CreatedTime": "The time this was created.
", + "DataSet$CreatedTime": "The time this was created.
", + "DataSet$LastUpdatedTime": "The last time this was updated.
", + "DataSetSummary$CreatedTime": "The time this was created.
", + "DataSetSummary$LastUpdatedTime": "The last time this was updated.
", + "DataSource$CreatedTime": "The time this was created.
", + "DataSource$LastUpdatedTime": "The last time this was updated.
", + "Template$LastUpdatedTime": "Time when this was last updated.
", + "Template$CreatedTime": "Time when this was created.
", + "TemplateSummary$CreatedTime": "The last time this was created.
", + "TemplateSummary$LastUpdatedTime": "The last time this was updated.
", + "TemplateVersion$CreatedTime": "The time this was created.
", + "TemplateVersionSummary$CreatedTime": "The time this was created.
", + "TimestampList$member": null + } + }, + "TimestampList": { + "base": null, + "refs": { + "DateTimeParameter$Values": "Values.
" + } + }, + "TransformOperation": { + "base": "A data transformation on a logical table. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.
", + "refs": { + "TransformOperationList$member": null + } + }, + "TransformOperationList": { + "base": null, + "refs": { + "LogicalTable$DataTransforms": "Transform operations that act on this logical table.
" + } + }, + "TwitterParameters": { + "base": "Twitter parameters.
", + "refs": { + "DataSourceParameters$TwitterParameters": "Twitter parameters.
" + } + }, + "TypeCastFormat": { + "base": null, + "refs": { + "CastColumnTypeOperation$Format": "When casting a column from string to datetime type, you can supply a QuickSight supported format string to denote the source data format.
" + } + }, "UnsupportedUserEditionException": { "base": "This error indicates that you are calling an operation on an Amazon QuickSight subscription where the edition doesn't include support for that operation. Amazon QuickSight currently has Standard Edition and Enterprise Edition. Not every operation and capability is available in every edition.
", "refs": { } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPublishedVersionRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPublishedVersionResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSetPermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSetPermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSourcePermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSourcePermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceResponse": { + "base": null, + "refs": { + } + }, "UpdateGroupRequest": { "base": null, "refs": { @@ -492,6 +2673,55 @@ "refs": { } }, + "UpdateIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "UpdateIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "UpdateResourcePermissionList": { + "base": null, + "refs": { + "UpdateDashboardPermissionsRequest$GrantPermissions": "The permissions that you want to grant on this resource.
", + "UpdateDashboardPermissionsRequest$RevokePermissions": "The permissions that you want to revoke from this resource.
", + "UpdateTemplatePermissionsRequest$GrantPermissions": "A list of resource permissions to be granted on the template. The following example shows the shorthand syntax:
Shorthand Syntax: Principal=string,Actions=string,string ...
A list of resource permissions to be revoked from the template. Shorthand syntax: Shorthand Syntax: Principal=string,Actions=string,string ...
Information on source file(s) format.
", + "refs": { + "S3Source$UploadSettings": "Information on the S3 source file(s) format.
" + } + }, "User": { "base": "A registered user of Amazon QuickSight. Currently, an Amazon QuickSight subscription can't contain more than 20 million users.
", "refs": { @@ -522,6 +2758,7 @@ "refs": { "DeleteUserRequest$UserName": "The name of the user that you want to delete.
", "DescribeUserRequest$UserName": "The name of the user that you want to describe.
", + "ListIAMPolicyAssignmentsForUserRequest$UserName": "The name of the user.
", "ListUserGroupsRequest$UserName": "The Amazon QuickSight user name that you want to list group memberships for.
", "RegisterUserRequest$UserName": "The Amazon QuickSight user name that you want to create for the user you are registering.
", "UpdateUserRequest$UserName": "The Amazon QuickSight user name that you want to update.
", @@ -531,9 +2768,67 @@ "UserRole": { "base": null, "refs": { - "RegisterUserRequest$UserRole": "The Amazon QuickSight role of the user. The user role can be one of the following:
READER
: A user who has read-only access to dashboards.
AUTHOR
: A user who can create data sources, data sets, analyses, and dashboards.
ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
The Amazon QuickSight role of the user. The user role can be one of the following:
READER
: A user who has read-only access to dashboards.
AUTHOR
: A user who can create data sources, data sets, analyses, and dashboards.
ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
The Amazon QuickSight role for the user.
" + "RegisterUserRequest$UserRole": "The Amazon QuickSight role for the user. The user role can be one of the following:
READER
: A user who has read-only access to dashboards.
AUTHOR
: A user who can create data sources, datasets, analyses, and dashboards.
ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
RESTRICTED_READER
: This role isn't currently available for use.
RESTRICTED_AUTHOR
: This role isn't currently available for use.
The Amazon QuickSight role of the user. The user role can be one of the following:
READER
: A user who has read-only access to dashboards.
AUTHOR
: A user who can create data sources, datasets, analyses, and dashboards.
ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
The Amazon QuickSight role for the user. The user role can be one of the following:.
READER
: A user who has read-only access to dashboards.
AUTHOR
: A user who can create data sources, datasets, analyses, and dashboards.
ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
RESTRICTED_READER
: This role isn't currently available for use.
RESTRICTED_AUTHOR
: This role isn't currently available for use.
Username.
" + } + }, + "VersionDescription": { + "base": null, + "refs": { + "CreateDashboardRequest$VersionDescription": "A description for the first version of the dashboard being created.
", + "CreateTemplateRequest$VersionDescription": "A description of the current template version being created. This API created the first version of the template. Every time UpdateTemplate is called a new version is created. Each version of the template maintains a description of the version in the VersionDescription field.
", + "DashboardVersion$Description": "Description.
", + "DashboardVersionSummary$Description": "Description.
", + "TemplateVersion$Description": "The description of the template.
", + "TemplateVersionSummary$Description": "The desription of the template version.
", + "UpdateDashboardRequest$VersionDescription": "A description for the first version of the dashboard being created.
", + "UpdateTemplateRequest$VersionDescription": "A description of the current template version being created. This API created the first version of the template. Every time UpdateTemplate is called a new version is created. Each version of the template maintains a description of the version in the VersionDescription field.
" + } + }, + "VersionNumber": { + "base": null, + "refs": { + "CreateTemplateAliasRequest$TemplateVersionNumber": "The version number of the template.
", + "DashboardSummary$PublishedVersionNumber": "Published version number.
", + "DashboardVersion$VersionNumber": "Version number.
", + "DashboardVersionSummary$VersionNumber": "Version number.
", + "DeleteDashboardRequest$VersionNumber": "The version number of the dashboard. If version number property is provided, only the specified version of the dashboard is deleted.
", + "DeleteTemplateRequest$VersionNumber": "The version number
", + "DescribeDashboardRequest$VersionNumber": "The version number for the dashboard. If version number isn’t passed the latest published dashboard version is described.
", + "DescribeTemplateRequest$VersionNumber": "This is an optional field, when a version number is provided the corresponding version is describe, if it's not provided the latest version of the template is described.
", + "TemplateAlias$TemplateVersionNumber": "The version number of the template alias.
", + "TemplateSummary$LatestVersionNumber": "A structure containing a list of version numbers for the template summary.
", + "TemplateVersion$VersionNumber": "The version number of the template.
", + "TemplateVersionSummary$VersionNumber": "The version number of the template version.
", + "UpdateDashboardPublishedVersionRequest$VersionNumber": "The version number of the dashboard.
", + "UpdateTemplateAliasRequest$TemplateVersionNumber": "The version number of the template.
" + } + }, + "VpcConnectionProperties": { + "base": "VPC connection properties.
", + "refs": { + "CreateDataSourceRequest$VpcConnectionProperties": "You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.
", + "DataSource$VpcConnectionProperties": "The VPC connection information. You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.
", + "UpdateDataSourceRequest$VpcConnectionProperties": "You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.
" + } + }, + "Warehouse": { + "base": null, + "refs": { + "SnowflakeParameters$Warehouse": "Warehouse.
" + } + }, + "WorkGroup": { + "base": null, + "refs": { + "AthenaParameters$WorkGroup": "The workgroup that Athena uses.
" } }, "boolean": { @@ -542,6 +2837,39 @@ "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "Remove the undo/redo button on embedded dashboard. The default is FALSE, which enables the undo/redo button.
", "GetDashboardEmbedUrlRequest$ResetDisabled": "Remove the reset button on embedded dashboard. The default is FALSE, which allows the reset button.
" } + }, + "long": { + "base": null, + "refs": { + "Ingestion$IngestionTimeInSeconds": "The time this ingestion took, measured in seconds.
", + "Ingestion$IngestionSizeInBytes": "Size of the data ingested in bytes.
", + "RowInfo$RowsIngested": "The number of rows that were ingested.
", + "RowInfo$RowsDropped": "The number of rows that were not ingested.
" + } + }, + "string": { + "base": null, + "refs": { + "CancelIngestionRequest$DataSetId": "The ID of the dataset used in the ingestion.
", + "CancelIngestionResponse$RequestId": "The AWS request ID for this operation.
", + "CreateIngestionRequest$DataSetId": "The ID of the dataset used in the ingestion.
", + "CreateIngestionResponse$RequestId": "The AWS request ID for this operation.
", + "DescribeIngestionRequest$DataSetId": "The ID of the dataset used in the ingestion.
", + "DescribeIngestionResponse$RequestId": "The AWS request ID for this operation.
", + "ErrorInfo$Message": "Error essage.
", + "ListIngestionsRequest$DataSetId": "The ID of the dataset used in the ingestion.
", + "ListIngestionsRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIngestionsResponse$NextToken": "The token for the next set of results, or null if there are no more results.
", + "ListIngestionsResponse$RequestId": "The AWS request ID for this operation.
", + "QueueInfo$WaitingOnIngestion": "The ID of the queued ingestion.
", + "QueueInfo$QueuedIngestion": "The ID of the ongoing ingestion. The queued ingestion is waiting for the ongoing ingestion to complete.
" + } + }, + "timestamp": { + "base": null, + "refs": { + "Ingestion$CreatedTime": "The time this ingestion started.
" + } } } } diff --git a/models/apis/quicksight/2018-04-01/paginators-1.json b/models/apis/quicksight/2018-04-01/paginators-1.json index 5677bd8e4a2..31fce46f122 100644 --- a/models/apis/quicksight/2018-04-01/paginators-1.json +++ b/models/apis/quicksight/2018-04-01/paginators-1.json @@ -1,4 +1,44 @@ { "pagination": { + "ListDashboardVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDashboards": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDataSources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListIngestions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplateAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplateVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } } } diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 199900dbcb5..2fb87a97ab7 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -25,7 +25,7 @@ "CreateDBSecurityGroup": "Creates a new DB security group. DB security groups control access to a DB instance.
A DB security group controls access to EC2-Classic DB instances that are not in a VPC.
Creates a DBSnapshot. The source DBInstance must be in \"available\" state.
", "CreateDBSubnetGroup": "Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.
", - "CreateEventSubscription": "Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.
If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.
", + "CreateEventSubscription": "Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.
If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.
Creates an Aurora global database spread across multiple regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action only applies to Aurora DB clusters.
Creates a new option group. You can create up to 20 option groups.
", "DeleteCustomAvailabilityZone": "Deletes a custom Availability Zone (AZ).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere cluster.
For more information about RDS on VMware, see the RDS on VMware User Guide.
", @@ -89,7 +89,7 @@ "ModifyDBClusterSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
To share a manual DB cluster snapshot with other AWS accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all
to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all
value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes
API action.
This action only applies to Aurora DB clusters.
Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications
before you call ModifyDBInstance
.
Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName
, ParameterValue
, and ApplyMethod
. A maximum of 20 parameters can be modified in a single request.
Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.
After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.
Updates a manual DB snapshot, which can be encrypted or not encrypted, with a new engine version.
Amazon RDS supports upgrading DB snapshots for MySQL and Oracle.
", + "ModifyDBSnapshot": "Updates a manual DB snapshot, which can be encrypted or not encrypted, with a new engine version.
Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.
", "ModifyDBSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
To share a manual DB snapshot with other AWS accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all
to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts. Do not add the all
value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts. If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes
API action.
Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.
", "ModifyEventSubscription": "Modifies an existing RDS event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription
and RemoveSourceIdentifierFromSubscription
calls.
You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.
", @@ -367,7 +367,7 @@ "CreateDBInstanceMessage$CopyTagsToSnapshot": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
Amazon Aurora
Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.
", "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.
You can enable IAM database authentication for the following database engines:
Amazon Aurora
Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.
MySQL
For MySQL 5.6, minor version 5.6.34 or higher
For MySQL 5.7, minor version 5.7.16 or higher
For MySQL 8.0, minor version 8.0.16 or higher
PostgreSQL
For PostgreSQL 9.5, minor version 9.5.15 or higher
For PostgreSQL 9.6, minor version 9.6.11 or higher
PostgreSQL 10.6, 10.7, and 10.9
For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
", "CreateDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.
", - "CreateDBInstanceMessage$DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.
", + "CreateDBInstanceMessage$DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.
Amazon Aurora
Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster
. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.
A value that indicates whether the Read Replica is in a Multi-AZ deployment.
You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.
", "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the Read Replica during the maintenance window.
Default: Inherits from the source DB instance
", "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.
", @@ -2058,7 +2058,7 @@ "DescribeReservedDBInstancesMessage$MaxRecords": " The maximum number of records to include in the response. If more than the MaxRecords
value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
", "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": " The maximum number of records to include in the response. If more than the MaxRecords
value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
", "DescribeSourceRegionsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
", - "ModifyCurrentDBClusterCapacityMessage$Capacity": "The DB cluster capacity.
When you change the capacity of a paused Aurora Serverless DB cluster, it automatically resumes.
Constraints:
Value must be 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, or 256
.
The DB cluster capacity.
When you change the capacity of a paused Aurora Serverless DB cluster, it automatically resumes.
Constraints:
For Aurora MySQL, valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
For Aurora PostgreSQL, valid capacity values are 2
, 4
, 8
, 16
, 32
, 64
, 192
, and 384
.
The amount of time, in seconds, that Aurora Serverless tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.
Value must be from 10 through 600.
The number of days for which automated backups are retained. You must specify a minimum value of 1.
Default: 1
Constraints:
Must be a value from 1 to 35
The port number on which the DB cluster accepts connections.
Constraints: Value must be 1150-65535
Default: The same port as the original DB cluster.
", @@ -2098,8 +2098,8 @@ "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
", "RestoreDBInstanceToPointInTimeMessage$Port": "The port number on which the database accepts connections.
Constraints: Value must be 1150-65535
Default: The same port as the original DB instance.
", "RestoreDBInstanceToPointInTimeMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
SQL Server
Setting the IOPS value for the SQL Server database engine isn't supported.
", - "ScalingConfiguration$MinCapacity": "The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
Valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
The minimum capacity must be less than or equal to the maximum capacity.
", - "ScalingConfiguration$MaxCapacity": "The maximum capacity for an Aurora DB cluster in serverless
DB engine mode.
Valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
The maximum capacity must be greater than or equal to the minimum capacity.
", + "ScalingConfiguration$MinCapacity": "The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
For Aurora MySQL, valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
For Aurora PostgreSQL, valid capacity values are 2
, 4
, 8
, 16
, 32
, 64
, 192
, and 384
.
The minimum capacity must be less than or equal to the maximum capacity.
", + "ScalingConfiguration$MaxCapacity": "The maximum capacity for an Aurora DB cluster in serverless
DB engine mode.
For Aurora MySQL, valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
For Aurora PostgreSQL, valid capacity values are 2
, 4
, 8
, 16
, 32
, 64
, 192
, and 384
.
The maximum capacity must be greater than or equal to the minimum capacity.
", "ScalingConfiguration$SecondsUntilAutoPause": "The time, in seconds, before an Aurora DB cluster in serverless
mode is paused.
The maximum capacity for the Aurora DB cluster in serverless
DB engine mode.
The maximum capacity for an Aurora DB cluster in serverless
DB engine mode.
The identifier for the DB snapshot to modify the attributes for.
", "ModifyDBSnapshotAttributeMessage$AttributeName": "The name of the DB snapshot attribute to modify.
To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, set this value to restore
.
The identifier of the DB snapshot to modify.
", - "ModifyDBSnapshotMessage$EngineVersion": "The engine version to upgrade the DB snapshot to.
The following are the database engines and engine versions that are available when you upgrade a DB snapshot.
MySQL
5.5.46
(supported for 5.1 DB snapshots)
Oracle
12.1.0.2.v8
(supported for 12.1.0.1 DB snapshots)
11.2.0.4.v12
(supported for 11.2.0.2 DB snapshots)
11.2.0.4.v11
(supported for 11.2.0.3 DB snapshots)
The engine version to upgrade the DB snapshot to.
The following are the database engines and engine versions that are available when you upgrade a DB snapshot.
MySQL
5.5.46
(supported for 5.1 DB snapshots)
Oracle
12.1.0.2.v8
(supported for 12.1.0.1 DB snapshots)
11.2.0.4.v12
(supported for 11.2.0.2 DB snapshots)
11.2.0.4.v11
(supported for 11.2.0.3 DB snapshots)
PostgreSQL
For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.
", "ModifyDBSnapshotMessage$OptionGroupName": "The option group to identify with the upgraded DB snapshot.
You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option Group Considerations in the Amazon RDS User Guide.
", "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.
Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
Example: mySubnetgroup
The description for the DB subnet group.
", diff --git a/models/apis/runtime.sagemaker/2017-05-13/api-2.json b/models/apis/runtime.sagemaker/2017-05-13/api-2.json index 507801756e5..4214dc72f0e 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/api-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/api-2.json @@ -37,6 +37,7 @@ "CustomAttributesHeader":{ "type":"string", "max":1024, + "pattern":"\\p{ASCII}*", "sensitive":true }, "EndpointName":{ @@ -46,7 +47,8 @@ }, "Header":{ "type":"string", - "max":1024 + "max":1024, + "pattern":"\\p{ASCII}*" }, "InternalFailure":{ "type":"structure", @@ -85,6 +87,11 @@ "shape":"CustomAttributesHeader", "location":"header", "locationName":"X-Amzn-SageMaker-Custom-Attributes" + }, + "TargetModel":{ + "shape":"TargetModelHeader", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Model" } }, "payload":"Body" @@ -139,6 +146,12 @@ "synthetic":true }, "StatusCode":{"type":"integer"}, + "TargetModelHeader":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"\\A\\S[\\p{Print}]*\\z" + }, "ValidationError":{ "type":"structure", "members":{ diff --git a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json index 3ecd8eed7e0..07398b40bcc 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json @@ -2,27 +2,27 @@ "version": "2.0", "service": "The Amazon SageMaker runtime API.
", "operations": { - "InvokeEndpoint": "After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.
For an overview of Amazon SageMaker, see How It Works.
Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.
Cals to InvokeEndpoint
are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference.
Endpoints are scoped to an individual account, and are not public. The URL does not contain the account ID, but Amazon SageMaker determines the account ID from the authentication token that is supplied by the caller.
After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.
For an overview of Amazon SageMaker, see How It Works.
Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.
Calls to InvokeEndpoint
are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference.
A customer's model containers must respond to requests within 60 seconds. The model itself can have a maximum processing time of 60 seconds before responding to the /invocations. If your model is going to take 50-60 seconds of processing time, the SDK socket timeout should be set to be 70 seconds.
Endpoints are scoped to an individual account, and are not public. The URL does not contain the account ID, but Amazon SageMaker determines the account ID from the authentication token that is supplied by the caller.
Provides input data, in the format specified in the ContentType
request header. Amazon SageMaker passes all of the data in the body to the model.
For information about the format of the request body, see Common Data Formats—Inference.
", - "InvokeEndpointOutput$Body": "Includes the inference provided by the model.
For information about the format of the response body, see Common Data Formats—Inference.
" + "InvokeEndpointInput$Body": "Provides input data, in the format specified in the ContentType
request header. Amazon SageMaker passes all of the data in the body to the model.
For information about the format of the request body, see Common Data Formats—Inference.
", + "InvokeEndpointOutput$Body": "Includes the inference provided by the model.
For information about the format of the response body, see Common Data Formats—Inference.
" } }, "CustomAttributesHeader": { "base": null, "refs": { - "InvokeEndpointInput$CustomAttributes": "", - "InvokeEndpointOutput$CustomAttributes": "" + "InvokeEndpointInput$CustomAttributes": "Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.
", + "InvokeEndpointOutput$CustomAttributes": "Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes
header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back.
This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.
" } }, "EndpointName": { "base": null, "refs": { - "InvokeEndpointInput$EndpointName": "The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.
" + "InvokeEndpointInput$EndpointName": "The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.
" } }, "Header": { @@ -66,7 +66,7 @@ } }, "ModelError": { - "base": "Model (owned by the customer in the container) returned an error 500.
", + "base": "Model (owned by the customer in the container) returned 4xx or 5xx error code.
", "refs": { } }, @@ -81,6 +81,12 @@ "ModelError$OriginalStatusCode": "Original status code.
" } }, + "TargetModelHeader": { + "base": null, + "refs": { + "InvokeEndpointInput$TargetModel": "Specifies the model to be requested for an inference when invoking a multi-model endpoint.
" + } + }, "ValidationError": { "base": "Inspect your request and try again.
", "refs": { diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index 46a665a0992..3699bab6cc3 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -2270,7 +2270,9 @@ "Account":{"shape":"AccountId"}, "StorageClass":{"shape":"StorageClass"}, "AccessControlTranslation":{"shape":"AccessControlTranslation"}, - "EncryptionConfiguration":{"shape":"EncryptionConfiguration"} + "EncryptionConfiguration":{"shape":"EncryptionConfiguration"}, + "ReplicationTime":{"shape":"ReplicationTime"}, + "Metrics":{"shape":"Metrics"} } }, "DisplayName":{"type":"string"}, @@ -2336,8 +2338,14 @@ "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed" + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold" ] }, "EventList":{ @@ -2345,6 +2353,20 @@ "member":{"shape":"Event"}, "flattened":true }, + "ExistingObjectReplication":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"ExistingObjectReplicationStatus"} + } + }, + "ExistingObjectReplicationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "Expiration":{"type":"string"}, "ExpirationStatus":{ "type":"string", @@ -4271,6 +4293,17 @@ }, "MetadataKey":{"type":"string"}, "MetadataValue":{"type":"string"}, + "Metrics":{ + "type":"structure", + "required":[ + "Status", + "EventThreshold" + ], + "members":{ + "Status":{"shape":"MetricsStatus"}, + "EventThreshold":{"shape":"ReplicationTimeValue"} + } + }, "MetricsAndOperator":{ "type":"structure", "members":{ @@ -4304,6 +4337,14 @@ } }, "MetricsId":{"type":"string"}, + "MetricsStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Minutes":{"type":"integer"}, "MissingMeta":{"type":"integer"}, "MultipartUpload":{ "type":"structure", @@ -5759,6 +5800,7 @@ "Filter":{"shape":"ReplicationRuleFilter"}, "Status":{"shape":"ReplicationRuleStatus"}, "SourceSelectionCriteria":{"shape":"SourceSelectionCriteria"}, + "ExistingObjectReplication":{"shape":"ExistingObjectReplication"}, "Destination":{"shape":"Destination"}, "DeleteMarkerReplication":{"shape":"DeleteMarkerReplication"} } @@ -5803,6 +5845,30 @@ "REPLICA" ] }, + "ReplicationTime":{ + "type":"structure", + "required":[ + "Status", + "Time" + ], + "members":{ + "Status":{"shape":"ReplicationTimeStatus"}, + "Time":{"shape":"ReplicationTimeValue"} + } + }, + "ReplicationTimeStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ReplicationTimeValue":{ + "type":"structure", + "members":{ + "Minutes":{"shape":"Minutes"} + } + }, "RequestCharged":{ "type":"string", "enum":["requester"] diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index c9408b86ad7..fcfb5e4a754 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -2,108 +2,108 @@ "version": "2.0", "service": "", "operations": { - "AbortMultipartUpload": "Aborts a multipart upload.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.
", - "CompleteMultipartUpload": "Completes a multipart upload by assembling previously uploaded parts.
", - "CopyObject": "Creates a copy of an object that is already stored in Amazon S3.
", - "CreateBucket": "Creates a new bucket.
", - "CreateMultipartUpload": "Initiates a multipart upload and returns an upload ID.
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
", - "DeleteBucket": "Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.
", - "DeleteBucketAnalyticsConfiguration": "Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others.
", - "DeleteBucketCors": "Deletes the CORS configuration information set for the bucket.
", - "DeleteBucketEncryption": "Deletes the server-side encryption configuration from the bucket.
", - "DeleteBucketInventoryConfiguration": "Deletes an inventory configuration (identified by the inventory ID) from the bucket.
", - "DeleteBucketLifecycle": "Deletes the lifecycle configuration from the bucket.
", - "DeleteBucketMetricsConfiguration": "Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.
", - "DeleteBucketPolicy": "Deletes the policy from the bucket.
", - "DeleteBucketReplication": "Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.
", - "DeleteBucketTagging": "Deletes the tags from the bucket.
", - "DeleteBucketWebsite": "This operation removes the website configuration from the bucket.
", - "DeleteObject": "Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
", - "DeleteObjectTagging": "Removes the tag-set from an existing object.
", - "DeleteObjects": "This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.
", - "DeletePublicAccessBlock": "Removes the PublicAccessBlock
configuration from an Amazon S3 bucket.
Returns the accelerate configuration of a bucket.
", - "GetBucketAcl": "Gets the access control policy for the bucket.
", - "GetBucketAnalyticsConfiguration": "Gets an analytics configuration for the bucket (specified by the analytics configuration ID).
", - "GetBucketCors": "Returns the CORS configuration for the bucket.
", - "GetBucketEncryption": "Returns the server-side encryption configuration of a bucket.
", - "GetBucketInventoryConfiguration": "Returns an inventory configuration (identified by the inventory ID) from the bucket.
", - "GetBucketLifecycle": "No longer used, see the GetBucketLifecycleConfiguration operation.
", - "GetBucketLifecycleConfiguration": "Returns the lifecycle configuration information set on the bucket.
", - "GetBucketLocation": "Returns the region the bucket resides in.
", - "GetBucketLogging": "Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.
", - "GetBucketMetricsConfiguration": "Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.
", - "GetBucketNotification": "No longer used, see the GetBucketNotificationConfiguration operation.
", - "GetBucketNotificationConfiguration": "Returns the notification configuration of a bucket.
", - "GetBucketPolicy": "Returns the policy of a specified bucket.
", - "GetBucketPolicyStatus": "Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.
", - "GetBucketReplication": "Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
Returns the request payment configuration of a bucket.
", - "GetBucketTagging": "Returns the tag set associated with the bucket.
", - "GetBucketVersioning": "Returns the versioning state of a bucket.
", - "GetBucketWebsite": "Returns the website configuration for a bucket.
", - "GetObject": "Retrieves objects from Amazon S3.
", - "GetObjectAcl": "Returns the access control list (ACL) of an object.
", - "GetObjectLegalHold": "Gets an object's current Legal Hold status.
", - "GetObjectLockConfiguration": "Gets the object lock configuration for a bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.
", - "GetObjectRetention": "Retrieves an object's retention settings.
", - "GetObjectTagging": "Returns the tag-set of an object.
", - "GetObjectTorrent": "Return torrent files from a bucket.
", - "GetPublicAccessBlock": "Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket.
This operation is useful to determine if a bucket exists and you have permission to access it.
", - "HeadObject": "The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
", - "ListBucketAnalyticsConfigurations": "Lists the analytics configurations for the bucket.
", - "ListBucketInventoryConfigurations": "Returns a list of inventory configurations for the bucket.
", - "ListBucketMetricsConfigurations": "Lists the metrics configurations for the bucket.
", + "AbortMultipartUpload": "This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure the parts list is empty.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to AbortMultipartUpload
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure the parts list is complete, this operation concatenates the parts you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends whitespace characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
GetBucketLifecycle
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to DeleteBucketMetricsConfiguration
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, for copying an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For conceptual information, see Copy Object Using the REST Multipart Upload API.
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
Amazon S3 Transfer Acceleration does not support cross-region copies. If you request a cross-region copy using a Transfer Acceleration endpoint, you get a 400 Bad Request
error. For more information about transfer acceleration, see Transfer Acceleration.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the request parameters x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, or x-amz-copy-source-if-modified-since
.
All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.
You can use this operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes.
The source object that you are copying can be encrypted or unencrypted. If the source object is encrypted, it can be encrypted by server-side encryption using AWS-managed encryption keys or by using a customer-provided encryption key. When copying an object, you can request that Amazon S3 encrypt the target object by using either the AWS-managed encryption keys or by using your own encryption key. You can do this regardless of the form of server-side encryption that was used to encrypt the source, or even if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
Consider the following when using request headers:
Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:
x-amz-copy-source-if-match condition evaluates to true
x-amz-copy-source-if-unmodified-since condition evaluates to false
Consideration 2 – If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:
x-amz-copy-source-if-none-match condition evaluates to false
x-amz-copy-source-if-modified-since condition evaluates to true
The copy request charge is based on the storage class and Region you specify for the destination object. For pricing information, see Amazon S3 Pricing.
Following are other considerations when using CopyObject
:
By default, x-amz-copy-source
identifies the current version of an object to copy. (If the current version is a delete marker, Amazon S3 behaves as if the object was deleted.) To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, then you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
To encrypt the target object, you must provide the appropriate encryption-related request headers. The one you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.
To encrypt the target object using server-side encryption with an AWS-managed encryption key, provide the following request headers, as appropriate.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed customer master key (CMK) in KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information on Server-Side Encryption with CMKs stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in KMS.
To encrypt the target object using server-side encryption with an encryption key that you provide, use the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
If the source object is encrypted using server-side encryption with customer-provided encryption keys, you must use the following headers.
x-amz-copy-source-server-side-encryption-customer-algorithm
x-amz-copy-source-server-side-encryption-customer-key
x-amz-copy-source-server-side-encryption-customer-key-MD5
For more information on Server-Side Encryption with CMKs stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
The following operation are related to CopyObject
For more information, see Copying Objects.
", + "CreateBucket": "Creates a new bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.
By default, the bucket is created in the US East (N. Virginia) region. You can optionally specify a region in the request body. You might choose a region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the EU (Ireland) region. For more information, see How to Select a Region for Your Buckets.
If you send your create bucket request to the s3.amazonaws.com endpoint, the request go to the us-east-1 region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as region, even if the location constraint in the request specifies another region where the bucket is to be created. If you create a bucket in a region other than US East (N. Virginia) region, your application must be able to handle 307 redirect. For more information, see Virtual Hosting of Buckets.
When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.
Specify a canned ACL using the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
, x-amz-grant-write
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, x-amz-grant-full-control
headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
The following operations are related to CreateBucket
:
This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
, kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in Amazon Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information on Server-Side Encryption with CMKs Stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
The following operations are related to CreateMultipartUpload
:
Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.
Related Resources
Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information more about cors
, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources:
", + "DeleteBucketEncryption": "This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "DeleteBucketInventoryConfiguration": "Deletes an inventory configuration (identified by the inventory ID) from the bucket.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
Operation related to DeleteBucketInventoryConfiguration
include:
Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.
There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "DeleteBucketMetricsConfiguration": "Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to DeleteBucketMetricsConfiguration
This implementation of the DELETE operation uses the policysubresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're notusing an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and UserPolicies.
The following operations are related to DeleteBucketPolicy
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 Developer Guide.
The following operations are related to DeleteBucketReplication
Deletes the tags from the bucket.
To use this operation, you must have permission to perform the s3:PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
The following operations are related to DeleteBucketTagging
This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE operation requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.
If the object you want to delete is in a bucket where the bucket versioning configurationis MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket you must deny them the s3:DeleteObject, s3:DeleteObjectVersion and s3:PutLifeCycleConfiguration actions.
The following operation is related to DeleteObject
Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.
To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.
The following operations are related to DeleteBucketMetricsConfiguration
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that, if the object specified in the request is not found, Amazon S3 returns the result as deleted.
The operation supports two modes for the response; verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.
When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non versioned objects you are attempting to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.
Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not be altered in transit.
The following operations are related to DeleteObjects
Removes the PublicAccessBlock configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to DeleteBucketMetricsConfiguration
:
This implementation of the GET operation uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state, if a state has never been set on the bucket.
For more information on transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "GetBucketAcl": "This implementation of the GET
operation uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
Related Resources
This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.
Related Resources
Returns the cors configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.
To learn more cors, see Enabling Cross-Origin Resource SharingEnabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.
To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
The following operations are related to GetBucketInventoryConfiguration
:
For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter
element, you should the updated version of this topic. This topic is provided for backward compatibility.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycle
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycle
:
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are still using previous version of the lifecycle configuration, it works. For the earlier API description, see GetBucketLifecycle.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to DeleteBucketMetricsConfiguration
:
Returns the region the bucket resides in. You set the bucket's region using the LocationConstraint
request parameter in a CreateBucket
request. For more information, see CreateBucket.
To use this implementation of the operation, you must be the bucket owner.
The following operations are related to GetBucketLocation
:
Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.
The following operations are related to GetBucketLogging
:
Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to GetBucketMetricsConfiguration
:
No longer used, see GetBucketNotificationConfiguration.
", + "GetBucketNotificationConfiguration": "Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following operation is related to GetBucketNotification
:
Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following operation is related to GetBucketPolicy
:
Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".
The following operations are related to GetBucketPolicyStatus
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication.
This operation requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
GetBucketReplication
has the following special error:
Error code: NoSuchReplicationConfiguration
Description: There is no replication configuration with that name.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketReplication
:
Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.
The following operations are related to GetBucketRequestPayment
:
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning state, i.e., if the MFA Delete status is enabled
, the bucket owner must use an authentication device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET operation requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE storage classes, before you can retrieve the object you must first restore a copy using . Otherwise, this operation returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
action), the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET operation returns the current version of an object. To return a different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP access to the object.
Versioning
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
The following operations are related to GetObjectAcl
:
Gets an object's current Legal Hold status. For more information, see Locking Objects.
", + "GetObjectLockConfiguration": "Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
", + "GetObjectRetention": "Retrieves an object's retention settings. For more information, see Locking Objects.
", + "GetObjectTagging": "Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following operation is related to GetObjectTagging
:
Return torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Amazon S3 Torrent.
You can get torrent only for objects that are less than 5 GB in size and that are not encrypted using server-side encryption with customer-provided encryption key.
To use GET, you must have READ access to the object.
The following operation is related to GetObjectTorrent
:
Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to GetPublicAccessBlock
:
This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK
if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found
and 403 Forbidden
.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return a HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return a HTTP status code 403 (\"access denied\") error.
The following operation is related to HeadObject
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
Returns a list of all buckets owned by the authenticated sender of the request.
", - "ListMultipartUploads": "This operation lists in-progress multipart uploads.
", - "ListObjectVersions": "Returns metadata about all of the versions of objects in a bucket.
", - "ListObjects": "Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
", - "ListObjectsV2": "Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.
", - "ListParts": "Lists the parts that have been uploaded for a specific multipart upload.
", - "PutBucketAccelerateConfiguration": "Sets the accelerate configuration of an existing bucket.
", - "PutBucketAcl": "Sets the permissions on a bucket using access control lists (ACL).
", - "PutBucketAnalyticsConfiguration": "Sets an analytics configuration for the bucket (specified by the analytics configuration ID).
", - "PutBucketCors": "Sets the CORS configuration for a bucket.
", - "PutBucketEncryption": "Creates a new server-side encryption configuration (or replaces an existing one, if present).
", - "PutBucketInventoryConfiguration": "Adds an inventory configuration (identified by the inventory ID) from the bucket.
", - "PutBucketLifecycle": "No longer used, see the PutBucketLifecycleConfiguration operation.
", - "PutBucketLifecycleConfiguration": "Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.
", - "PutBucketLogging": "Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.
", - "PutBucketMetricsConfiguration": "Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.
", - "PutBucketNotification": "No longer used, see the PutBucketNotificationConfiguration operation.
", - "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket.
", - "PutBucketPolicy": "Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
", - "PutBucketReplication": "Creates a replication configuration or replaces an existing one. For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.
", - "PutBucketRequestPayment": "Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
", - "PutBucketTagging": "Sets the tags for a bucket.
", - "PutBucketVersioning": "Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.
", - "PutBucketWebsite": "Set the website configuration for a bucket.
", - "PutObject": "Adds an object to a bucket.
", - "PutObjectAcl": "uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket
", - "PutObjectLegalHold": "Applies a Legal Hold configuration to the specified object.
", - "PutObjectLockConfiguration": "Places an object lock configuration on the specified bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.
", - "PutObjectRetention": "Places an Object Retention configuration on an object.
", - "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket
", - "PutPublicAccessBlock": "Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket.
Restores an archived copy of an object back into Amazon S3
", - "SelectObjectContent": "This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
", - "UploadPart": "Uploads a part in a multipart upload.
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
", - "UploadPartCopy": "Uploads a part by copying data from an existing object as data source.
" + "ListMultipartUploads": "This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListMultipartUploads
:
Returns metadata about all of the versions of objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.
A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use this operation, you must have READ access to the bucket.
The following operations are related to ListObjectVersions
:
Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use thisoperation, you must have READ access to the bucket.
To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers you specify explicit access permissions and grantees (AWS accounts or a Amazon S3 groups) who will receive the permission. If you use these ACL specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
Related Resources
", + "PutBucketAnalyticsConfiguration": "Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports to a comma-separated values (CSV) flat file, see the DataExport request element. Reports are updated daily and are based on the object filters you configure. When selecting data export you specify a destination bucket and optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
Special Errors
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
Related Resources
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "PutBucketEncryption": "This implementation of the PUT
operation uses the encryption
subresource to set the default encryption state of an existing bucket.
This implementation of the PUT
operation sets default encryption for a buckets using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS) bucket. For information about the Amazon S3 default encryption feature, see As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action. in the Amazon Simple Storage Service Developer Guide.
This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "PutBucketInventoryConfiguration": "This implementation of the PUT
operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket
Related Resources
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the AWS account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon Simple Storage Service Developer Guide:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
Rules
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:
Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.
Status whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
Permissions
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.
You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following are related to PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:
<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />
For more information about server access logging, see Server Access Logging.
For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.
The following operations are related to PutBucketLogging
:
Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to PutBucketMetricsConfiguration
:
GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
Description:You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Status Code: HTTP 400 Bad Request
No longer used, see the PutBucketNotificationConfiguration operation.
", + "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This operation replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to topic.
The following operations is related to PutBucketNotificationConfiguration
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolic
y permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following operations are related to PutBucketPolicy
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.
To perform this operation, the user or role performing the operation must have the iam:PassRole permission.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
For information about enabling versioning on a bucket, see Using Versioning.
By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.
PutBucketReplication
has the following special errors:
Error code: InvalidRequest
Description: If the <Owner> in <AccessControlTranslation> has a value, the <Account> element must be specified.
HTTP 400
Error code: InvalidArgument
Description: The <Account> element is empty. It must contain a valid account ID.
HTTP 400
Error code: InvalidArgument
Description: The AWS account specified in the <Account> element must match the destination bucket owner.
HTTP 400
The following operations are related to PutBucketReplication
:
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional operation is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request
header and the Status and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
Related Resources
", + "PutBucketWebsite": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT operation requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
To configure your application to send the request headers before sending the request body, use the 100-continue
HTTP status code. For PUT operations, this helps you avoid sending the message body if the message is rejected based on the headers (for example, because authentication fails or a redirect occurs). For more information on the 100-continue
HTTP status code, see Section 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt.
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS-managed encryption keys. For more information, see Using Server-Side Encryption.
You can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.
Use encryption keys managed Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information on Server-Side Encryption with CMKs stored in KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
EU (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported regions and endpoints, see Regions and Endpoints in the AWS General Reference
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the default AWS KMS CMK to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
If you use this feature, the ETag value that Amazon S3 returns in the response is not the MD5 of the object.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Storage Class Options
By default, Amazon S3 uses the Standard storage class to store newly created objects. The Standard storage class provides high durability and high availability. You can specify other storage classes depending on the performance needs. For more information, see Storage Classes in the Amazon Simple Storage Service Developer Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response using the x-amz-version-id response
header. If versioning is suspended, Amazon S3 always uses null as the version ID for the object stored. For more information about returning the versioning state of a bucket, see GetBucketVersioning. If you enable versioning for a bucket, when Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
Related Resources
", + "PutObjectAcl": "uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket. You must have WRITE_ACP permission to set the ACL of an object.
Depending on your application needs, you may choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers you specify explicit access permissions and grantees (AWS accounts or a Amazon S3 groups) who will receive the permission. If you use these ACL specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
emailAddress – if the value specified is the email address of an AWS account
id – if the value specified is the canonical user ID of an AWS account
uri – if you are granting permissions to a predefined group
For example, the following x-amz-grant-read header grants list objects permission to the two AWS accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", + "PutObjectLegalHold": "Applies a Legal Hold configuration to the specified object.
Related Resources
", + "PutObjectLockConfiguration": "Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.
DefaultRetention
requires either Days or Years. You can't specify both at the same time.
Related Resources
", + "PutObjectRetention": "Places an Object Retention configuration on an object.
Related Resources
", + "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", + "PutPublicAccessBlock": "Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
Related Resources
Restores an archived copy of an object back into Amazon S3
This operation performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
and s3:GetObject
actions. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon Simple Storage Service Developer Guide
Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with Glacier Select restore, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring Archives
Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the GLACIER storage class when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals are typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for the DEEP_ARCHIVE storage class.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. Standard retrievals typically complete within 3-5 hours from the GLACIER storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.
Bulk
- Bulk retrievals are Amazon Glacier’s lowest-cost retrieval option, enabling you to retrieve large amounts, even petabytes, of data inexpensively in a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER storage class and typically complete within 48 hours from the DEEP_ARCHIVE storage class.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier
request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days
request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.
Responses
A successful operation returns either the 200 OK
or 202 Accepted
status code.
If the object copy is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object copy is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: Glacier expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide
This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see RESTSelectObjectAppendix .
GetObject Support
The SelectObjectContent
operation does not support the following GetObject
functionality. For more information, see GetObject.
Range
: While you can specify a scan range for a Amazon S3 Select request, see SelectObjectContentRequest$ScanRange in the request parameters below, you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.
Special Errors
For a list of special errors for this operation and for general information about Amazon S3 errors and a list of error codes, see ErrorResponses
Related Resources
", + "UploadPart": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
", + "UploadPartCopy": "Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.
Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information on using the UploadPartCopy operation, see the following topics:
For conceptual information on multipart uploads, go to Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service Developer Guide.
For information on permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
For information about copying objects using a single atomic operation vs. the multipart upload, go to Operations on Objects in the Amazon Simple Storage Service Developer Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
then, S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
then, S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Date when multipart upload will become eligible for abort operation by lifecycle.
", - "ListPartsOutput$AbortDate": "Date when multipart upload will become eligible for abort operation by lifecycle.
" + "CreateMultipartUploadOutput$AbortDate": "If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
The response also includes the x-amz-abort-rule-id header that provides the ID of the lifecycle configuration rule that defines this action.
", + "ListPartsOutput$AbortDate": "If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, then the response includes this header indicating when the initiated multipart upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
The response will also include the x-amz-abort-rule-id header that will provide the ID of the lifecycle configuration rule that defines this action.
" } }, "AbortIncompleteMultipartUpload": { "base": "Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "LifecycleRule$AbortIncompleteMultipartUpload": "", - "Rule$AbortIncompleteMultipartUpload": "" + "LifecycleRule$AbortIncompleteMultipartUpload": null, + "Rule$AbortIncompleteMultipartUpload": null } }, "AbortMultipartUploadOutput": { @@ -119,21 +119,21 @@ "AbortRuleId": { "base": null, "refs": { - "CreateMultipartUploadOutput$AbortRuleId": "Id of the lifecycle rule that makes a multipart upload eligible for abort operation.
", - "ListPartsOutput$AbortRuleId": "Id of the lifecycle rule that makes a multipart upload eligible for abort operation.
" + "CreateMultipartUploadOutput$AbortRuleId": "This header is returned along with the x-amz-abort-date header. It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.
", + "ListPartsOutput$AbortRuleId": "This header is returned along with the x-amz-abort-date header. It identifies applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.
" } }, "AccelerateConfiguration": { "base": "Configures the transfer acceleration state for an Amazon S3 bucket. For more information, see Amazon S3 Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "PutBucketAccelerateConfigurationRequest$AccelerateConfiguration": "Specifies the Accelerate Configuration you want to set for the bucket.
" + "PutBucketAccelerateConfigurationRequest$AccelerateConfiguration": "Container for setting the transfer acceleration state.
" } }, "AcceptRanges": { "base": null, "refs": { - "GetObjectOutput$AcceptRanges": "", - "HeadObjectOutput$AcceptRanges": "" + "GetObjectOutput$AcceptRanges": "Indicates that a range of bytes was specifed.
", + "HeadObjectOutput$AcceptRanges": "Indicates that a range of bytes was specifed.
" } }, "AccessControlPolicy": { @@ -153,7 +153,7 @@ "base": null, "refs": { "AnalyticsS3BucketDestination$BucketAccountId": "The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.
", - "Destination$Account": "Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation
property, this is the account ID of the destination bucket owner. For more information, see Cross-Region Replication Additional Configuration: Change Replica Owner in the Amazon Simple Storage Service Developer Guide.
Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation
property, this is the account ID of the destination bucket owner. For more information, see Replication Additional Configuration: Change Replica Owner in the Amazon Simple Storage Service Developer Guide.
The ID of the account that owns the destination bucket.
" } }, @@ -206,7 +206,7 @@ } }, "AnalyticsConfiguration": { - "base": "Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.
For more information, see GET Bucket analytics in the Amazon Simple Storage Service API Reference.
", + "base": "Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.
", "refs": { "AnalyticsConfigurationList$member": null, "GetBucketAnalyticsConfigurationOutput$AnalyticsConfiguration": "The configuration and any analyses for the analytics filter.
", @@ -226,7 +226,7 @@ } }, "AnalyticsFilter": { - "base": "", + "base": "The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.
", "refs": { "AnalyticsConfiguration$Filter": "The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.
" } @@ -241,7 +241,7 @@ } }, "AnalyticsS3BucketDestination": { - "base": "", + "base": "Contains information about where to publish the analytics results.
", "refs": { "AnalyticsExportDestination$S3BucketDestination": "A destination signifying output to an S3 bucket.
" } @@ -256,14 +256,14 @@ "base": null, "refs": { "GetObjectOutput$Body": "Object data.
", - "GetObjectTorrentOutput$Body": "", + "GetObjectTorrentOutput$Body": "A Bencoded dictionary as defined by the BitTorrent specification
", "PutObjectRequest$Body": "Object data.
", "RecordsEvent$Payload": "The byte array of partial, one or more result records.
", "UploadPartRequest$Body": "Object data.
" } }, "Bucket": { - "base": "", + "base": "In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all AWS accounts.
", "refs": { "Buckets$member": null } @@ -281,7 +281,7 @@ } }, "BucketAlreadyOwnedByYou": { - "base": "", + "base": "The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
", "refs": { } }, @@ -295,20 +295,20 @@ "BucketLifecycleConfiguration": { "base": "Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For more information, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": "" + "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": "Container for lifecycle rules. You can add as many as 1,000 rules.
" } }, "BucketLocationConstraint": { "base": null, "refs": { "CreateBucketConfiguration$LocationConstraint": "Specifies the region where the bucket will be created. If you don't specify a region, the bucket is created in US East (N. Virginia) Region (us-east-1).
", - "GetBucketLocationOutput$LocationConstraint": "" + "GetBucketLocationOutput$LocationConstraint": "Specifies the region where the bucket resides. For a list of all the Amazon S3 supported location constraints by region, see Regions and Endpoints.
" } }, "BucketLoggingStatus": { - "base": "", + "base": "Container for logging status information.
", "refs": { - "PutBucketLoggingRequest$BucketLoggingStatus": "" + "PutBucketLoggingRequest$BucketLoggingStatus": "Container for logging status information.
" } }, "BucketLogsPermission": { @@ -320,103 +320,103 @@ "BucketName": { "base": null, "refs": { - "AbortMultipartUploadRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
", + "AbortMultipartUploadRequest$Bucket": "The bucket to which the upload was taking place.
", "AnalyticsS3BucketDestination$Bucket": "The Amazon Resource Name (ARN) of the bucket to which data is exported.
", "Bucket$Name": "The name of the bucket.
", - "CompleteMultipartUploadOutput$Bucket": "", - "CompleteMultipartUploadRequest$Bucket": "", - "CopyObjectRequest$Bucket": "", - "CreateBucketRequest$Bucket": "", + "CompleteMultipartUploadOutput$Bucket": "The name of the bucket that contains the newly created object.
", + "CompleteMultipartUploadRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
", + "CopyObjectRequest$Bucket": "The name of the destination bucket.
", + "CreateBucketRequest$Bucket": "The name of the bucket to create.
", "CreateMultipartUploadOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.
", - "CreateMultipartUploadRequest$Bucket": "", + "CreateMultipartUploadRequest$Bucket": "The name of the bucket to which to initiate the upload
", "DeleteBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket from which an analytics configuration is deleted.
", - "DeleteBucketCorsRequest$Bucket": "", + "DeleteBucketCorsRequest$Bucket": "Specifies the bucket whose cors
configuration is being deleted.
The name of the bucket containing the server-side encryption configuration to delete.
", "DeleteBucketInventoryConfigurationRequest$Bucket": "The name of the bucket containing the inventory configuration to delete.
", - "DeleteBucketLifecycleRequest$Bucket": "", + "DeleteBucketLifecycleRequest$Bucket": "The bucket name of the lifecycle to delete.
", "DeleteBucketMetricsConfigurationRequest$Bucket": "The name of the bucket containing the metrics configuration to delete.
", - "DeleteBucketPolicyRequest$Bucket": "", - "DeleteBucketReplicationRequest$Bucket": "The bucket name.
It can take a while to propagate the deletion of a replication configuration to all Amazon S3 systems.
The bucket name.
", + "DeleteBucketReplicationRequest$Bucket": "The bucket name.
", + "DeleteBucketRequest$Bucket": "Specifies the bucket being deleted.
", + "DeleteBucketTaggingRequest$Bucket": "The bucket that has the tag set to be removed.
", + "DeleteBucketWebsiteRequest$Bucket": "The bucket name for which you want to remove the website configuration.
", + "DeleteObjectRequest$Bucket": "The bucket name of the bucket containing the object.
", + "DeleteObjectTaggingRequest$Bucket": "The bucket containing the objects from which to remove the tags.
", + "DeleteObjectsRequest$Bucket": "The bucket name containing the objects to delete.
", "DeletePublicAccessBlockRequest$Bucket": "The Amazon S3 bucket whose PublicAccessBlock
configuration you want to delete.
The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.
A replication configuration can replicate objects to only one destination bucket. If there are multiple rules in your replication configuration, all rules must specify the same destination bucket.
", + "Destination$Bucket": "The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store the results.
", "GetBucketAccelerateConfigurationRequest$Bucket": "Name of the bucket for which the accelerate configuration is retrieved.
", - "GetBucketAclRequest$Bucket": "", + "GetBucketAclRequest$Bucket": "Specifies the S3 bucket whose ACL is being requested.
", "GetBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket from which an analytics configuration is retrieved.
", - "GetBucketCorsRequest$Bucket": "", + "GetBucketCorsRequest$Bucket": "The bucket name for which to get the cors configuration.
", "GetBucketEncryptionRequest$Bucket": "The name of the bucket from which the server-side encryption configuration is retrieved.
", "GetBucketInventoryConfigurationRequest$Bucket": "The name of the bucket containing the inventory configuration to retrieve.
", - "GetBucketLifecycleConfigurationRequest$Bucket": "", - "GetBucketLifecycleRequest$Bucket": "", - "GetBucketLocationRequest$Bucket": "", - "GetBucketLoggingRequest$Bucket": "", + "GetBucketLifecycleConfigurationRequest$Bucket": "The name of the bucket for which to the the lifecycle information.
", + "GetBucketLifecycleRequest$Bucket": "The name of the bucket for which to the the lifecycle information.
", + "GetBucketLocationRequest$Bucket": "The name of the bucket for which to get the location.
", + "GetBucketLoggingRequest$Bucket": "The bucket name for which to get the logging information.
", "GetBucketMetricsConfigurationRequest$Bucket": "The name of the bucket containing the metrics configuration to retrieve.
", - "GetBucketNotificationConfigurationRequest$Bucket": "Name of the bucket to get the notification configuration for.
", - "GetBucketPolicyRequest$Bucket": "", + "GetBucketNotificationConfigurationRequest$Bucket": "Name of the bucket for which to get the notification configuration
", + "GetBucketPolicyRequest$Bucket": "The bucket name for which to get the bucket policy.
", "GetBucketPolicyStatusRequest$Bucket": "The name of the Amazon S3 bucket whose policy status you want to retrieve.
", - "GetBucketReplicationRequest$Bucket": "", - "GetBucketRequestPaymentRequest$Bucket": "", - "GetBucketTaggingRequest$Bucket": "", - "GetBucketVersioningRequest$Bucket": "", - "GetBucketWebsiteRequest$Bucket": "", - "GetObjectAclRequest$Bucket": "", + "GetBucketReplicationRequest$Bucket": "The bucket name for which to get the replication information.
", + "GetBucketRequestPaymentRequest$Bucket": "The name of the bucket for which to get the payment request configuration
", + "GetBucketTaggingRequest$Bucket": "The name of the bucket for which to get the tagging information.
", + "GetBucketVersioningRequest$Bucket": "The name of the bucket for which to get the versioning information.
", + "GetBucketWebsiteRequest$Bucket": "The bucket name for which to get the website configuration.
", + "GetObjectAclRequest$Bucket": "The bucket name of the object for which to get the ACL information.
", "GetObjectLegalHoldRequest$Bucket": "The bucket containing the object whose Legal Hold status you want to retrieve.
", - "GetObjectLockConfigurationRequest$Bucket": "The bucket whose object lock configuration you want to retrieve.
", - "GetObjectRequest$Bucket": "", + "GetObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to retrieve.
", + "GetObjectRequest$Bucket": "The bucket name containing the object.
", "GetObjectRetentionRequest$Bucket": "The bucket containing the object whose retention settings you want to retrieve.
", - "GetObjectTaggingRequest$Bucket": "", - "GetObjectTorrentRequest$Bucket": "", + "GetObjectTaggingRequest$Bucket": "The bucket name containing the object for which to get the tagging information.
", + "GetObjectTorrentRequest$Bucket": "The name of the bucket containing the object for which to get the torrent files.
", "GetPublicAccessBlockRequest$Bucket": "The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to retrieve.
The bucket name.
", + "HeadObjectRequest$Bucket": "The name of the bucket containing the object.
", "InventoryS3BucketDestination$Bucket": "The Amazon resource name (ARN) of the bucket where inventory results will be published.
", "ListBucketAnalyticsConfigurationsRequest$Bucket": "The name of the bucket from which analytics configurations are retrieved.
", "ListBucketInventoryConfigurationsRequest$Bucket": "The name of the bucket containing the inventory configurations to retrieve.
", "ListBucketMetricsConfigurationsRequest$Bucket": "The name of the bucket containing the metrics configurations to retrieve.
", "ListMultipartUploadsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.
", - "ListMultipartUploadsRequest$Bucket": "", - "ListObjectVersionsOutput$Name": "", - "ListObjectVersionsRequest$Bucket": "", - "ListObjectsOutput$Name": "", - "ListObjectsRequest$Bucket": "", - "ListObjectsV2Output$Name": "Name of the bucket to list.
", + "ListMultipartUploadsRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
", + "ListObjectVersionsOutput$Name": "Bucket owner's name.
", + "ListObjectVersionsRequest$Bucket": "The name of the bucket that contains the objects.
", + "ListObjectsOutput$Name": "Name of the bucket.
", + "ListObjectsRequest$Bucket": "The name of the bucket containing the objects.
", + "ListObjectsV2Output$Name": "Name of the bucket.
", "ListObjectsV2Request$Bucket": "Name of the bucket to list.
", "ListPartsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.
", - "ListPartsRequest$Bucket": "", + "ListPartsRequest$Bucket": "Name of the bucket to which the parts are being uploaded.->
", "PutBucketAccelerateConfigurationRequest$Bucket": "Name of the bucket for which the accelerate configuration is set.
", - "PutBucketAclRequest$Bucket": "", + "PutBucketAclRequest$Bucket": "The bucket to which to apply the ACL.
", "PutBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket to which an analytics configuration is stored.
", - "PutBucketCorsRequest$Bucket": "", - "PutBucketEncryptionRequest$Bucket": "Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.
", + "PutBucketCorsRequest$Bucket": "Specifies the bucket impacted by the cors
configuration.
Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.
", "PutBucketInventoryConfigurationRequest$Bucket": "The name of the bucket where the inventory configuration will be stored.
", - "PutBucketLifecycleConfigurationRequest$Bucket": "", + "PutBucketLifecycleConfigurationRequest$Bucket": "The name of the bucket for which to set the configuration.
", "PutBucketLifecycleRequest$Bucket": "", - "PutBucketLoggingRequest$Bucket": "", + "PutBucketLoggingRequest$Bucket": "The name of the bucket for which to set the logging parameters.
", "PutBucketMetricsConfigurationRequest$Bucket": "The name of the bucket for which the metrics configuration is set.
", - "PutBucketNotificationConfigurationRequest$Bucket": "", - "PutBucketNotificationRequest$Bucket": "", - "PutBucketPolicyRequest$Bucket": "", - "PutBucketReplicationRequest$Bucket": "", - "PutBucketRequestPaymentRequest$Bucket": "", - "PutBucketTaggingRequest$Bucket": "", - "PutBucketVersioningRequest$Bucket": "", - "PutBucketWebsiteRequest$Bucket": "", - "PutObjectAclRequest$Bucket": "", + "PutBucketNotificationConfigurationRequest$Bucket": "The name of the bucket.
", + "PutBucketNotificationRequest$Bucket": "The name of the bucket.
", + "PutBucketPolicyRequest$Bucket": "The name of the bucket.
", + "PutBucketReplicationRequest$Bucket": "The name of the bucket
", + "PutBucketRequestPaymentRequest$Bucket": "The bucket name.
", + "PutBucketTaggingRequest$Bucket": "The bucket name.
", + "PutBucketVersioningRequest$Bucket": "The bucket name.
", + "PutBucketWebsiteRequest$Bucket": "The bucket name.
", + "PutObjectAclRequest$Bucket": "The name of the bucket to which the ACL is being added.
", "PutObjectLegalHoldRequest$Bucket": "The bucket containing the object that you want to place a Legal Hold on.
", - "PutObjectLockConfigurationRequest$Bucket": "The bucket whose object lock configuration you want to create or replace.
", + "PutObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to create or replace.
", "PutObjectRequest$Bucket": "Name of the bucket to which the PUT operation was initiated.
", "PutObjectRetentionRequest$Bucket": "The bucket that contains the object you want to apply this Object Retention configuration to.
", - "PutObjectTaggingRequest$Bucket": "", + "PutObjectTaggingRequest$Bucket": "The bucket containing the object.
", "PutPublicAccessBlockRequest$Bucket": "The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to set.
The bucket name.
", "S3Location$BucketName": "The name of the bucket where the restore results will be placed.
", "SelectObjectContentRequest$Bucket": "The S3 bucket.
", - "UploadPartCopyRequest$Bucket": "", + "UploadPartCopyRequest$Bucket": "The bucket name.
", "UploadPartRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
" } }, @@ -430,15 +430,15 @@ "Buckets": { "base": null, "refs": { - "ListBucketsOutput$Buckets": "" + "ListBucketsOutput$Buckets": "The list of buckets owned by the requestor.
" } }, "BypassGovernanceRetention": { "base": null, "refs": { - "DeleteObjectRequest$BypassGovernanceRetention": "Indicates whether Amazon S3 object lock should bypass governance-mode restrictions to process this operation.
", - "DeleteObjectsRequest$BypassGovernanceRetention": "Specifies whether you want to delete this object even if it has a Governance-type object lock in place. You must have sufficient permissions to perform this operation.
", - "PutObjectRetentionRequest$BypassGovernanceRetention": "Indicates whether this operation should bypass Governance-mode restrictions.j
" + "DeleteObjectRequest$BypassGovernanceRetention": "Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation.
", + "DeleteObjectsRequest$BypassGovernanceRetention": "Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. You must have sufficient permissions to perform this operation.
", + "PutObjectRetentionRequest$BypassGovernanceRetention": "Indicates whether this operation should bypass Governance-mode restrictions.
" } }, "BytesProcessed": { @@ -465,7 +465,7 @@ "CORSConfiguration": { "base": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "PutBucketCorsRequest$CORSConfiguration": "" + "PutBucketCorsRequest$CORSConfiguration": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
" } }, "CORSRule": { @@ -477,18 +477,18 @@ "CORSRules": { "base": null, "refs": { - "CORSConfiguration$CORSRules": "A set of allowed origins and methods.
", - "GetBucketCorsOutput$CORSRules": "" + "CORSConfiguration$CORSRules": "A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.
", + "GetBucketCorsOutput$CORSRules": "A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.
" } }, "CSVInput": { - "base": "Describes how a CSV-formatted input object is formatted.
", + "base": "Describes how a uncompressed comma-separated values (CSV)-formatted input object is formatted.
", "refs": { "InputSerialization$CSV": "Describes the serialization of a CSV-encoded object.
" } }, "CSVOutput": { - "base": "Describes how CSV-formatted results are formatted.
", + "base": "Describes how uncompressed comma-separated values (CSV)-formatted results are formatted.
", "refs": { "OutputSerialization$CSV": "Describes the serialization of CSV-encoded Select results.
" } @@ -500,41 +500,41 @@ "CreateMultipartUploadRequest$CacheControl": "Specifies caching behavior along the request/reply chain.
", "GetObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.
", "HeadObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.
", - "PutObjectRequest$CacheControl": "Specifies caching behavior along the request/reply chain.
" + "PutObjectRequest$CacheControl": "Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.
" } }, "CloudFunction": { "base": null, "refs": { - "CloudFunctionConfiguration$CloudFunction": "" + "CloudFunctionConfiguration$CloudFunction": "Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.
" } }, "CloudFunctionConfiguration": { - "base": "", + "base": "Container for specifying the AWS Lambda notification configuration.
", "refs": { - "NotificationConfigurationDeprecated$CloudFunctionConfiguration": "" + "NotificationConfigurationDeprecated$CloudFunctionConfiguration": "Container for specifying the AWS Lambda notification configuration.
" } }, "CloudFunctionInvocationRole": { "base": null, "refs": { - "CloudFunctionConfiguration$InvocationRole": "" + "CloudFunctionConfiguration$InvocationRole": "The role supporting the invocation of the lambda function
" } }, "Code": { "base": null, "refs": { - "Error$Code": "" + "Error$Code": "The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.
Amazon S3 error codes
Code: AccessDenied
Description: Access Denied
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AccountProblem
Description: There is a problem with your AWS account that prevents the operation from completing successfully. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AllAccessDisabled
Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AmbiguousGrantByEmailAddress
Description: The email address you provided is associated with more than one account.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: AuthorizationHeaderMalformed
Description: The authorization header you provided is invalid.
HTTP Status Code: 400 Bad Request
HTTP Status Code: N/A
Code: BadDigest
Description: The Content-MD5 you specified did not match what we received.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: BucketAlreadyExists
Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: BucketAlreadyOwnedByYou
Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
Code: 409 Conflict (in all regions except the North Virginia region)
SOAP Fault Code Prefix: Client
Code: BucketNotEmpty
Description: The bucket you tried to delete is not empty.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: CredentialsNotSupported
Description: This request does not support credentials.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: CrossLocationLoggingProhibited
Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: EntityTooLarge
Description: Your proposed upload exceeds the maximum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: ExpiredToken
Description: The provided token has expired.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IllegalVersioningConfigurationException
Description: Indicates that the versioning configuration specified in the request is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncompleteBody
Description: You did not provide the number of bytes specified by the Content-Length HTTP header
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncorrectNumberOfFilesInPostRequest
Description: POST requires exactly one file upload per request.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InlineDataTooLarge
Description: Inline data exceeds the maximum allowed size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InternalError
Description: We encountered an internal error. Please try again.
HTTP Status Code: 500 Internal Server Error
SOAP Fault Code Prefix: Server
Code: InvalidAccessKeyId
Description: The AWS access key ID you provided does not exist in our records.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidAddressingHeader
Description: You must specify the Anonymous role.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: InvalidArgument
Description: Invalid Argument
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketName
Description: The specified bucket is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketState
Description: The request is not valid with the current state of the bucket.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: InvalidDigest
Description: The Content-MD5 you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidEncryptionAlgorithmError
Description: The encryption request you specified is not valid. The valid value is AES256.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidLocationConstraint
Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidObjectState
Description: The operation is not valid for the current state of the object.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPartOrder
Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPayer
Description: All access to this object has been disabled. Please contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPolicyDocument
Description: The content of the form does not meet the conditions specified in the policy document.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRange
Description: The requested range cannot be satisfied.
HTTP Status Code: 416 Requested Range Not Satisfiable
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Please use AWS4-HMAC-SHA256.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: SOAP requests must be made over an HTTPS connection.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidSecurity
Description: The provided security credentials are not valid.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidSOAPRequest
Description: The SOAP request body is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidStorageClass
Description: The storage class you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidTargetBucketForLogging
Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidToken
Description: The provided token is malformed or otherwise invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidURI
Description: Couldn't parse the specified URI.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: KeyTooLongError
Description: Your key is too long.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedACLError
Description: The XML you provided was not well-formed or did not validate against our published schema.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedPOSTRequest
Description: The body of your POST request is not well-formed multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedXML
Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxMessageLengthExceeded
Description: Your request was too big.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxPostPreDataLengthExceededError
Description: Your POST request fields preceding the upload file were too large.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MetadataTooLarge
Description: Your metadata headers exceed the maximum allowed metadata size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MethodNotAllowed
Description: The specified method is not allowed against this resource.
HTTP Status Code: 405 Method Not Allowed
SOAP Fault Code Prefix: Client
Code: MissingAttachment
Description: A SOAP attachment was expected, but none were found.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: MissingContentLength
Description: You must provide the Content-Length HTTP header.
HTTP Status Code: 411 Length Required
SOAP Fault Code Prefix: Client
Code: MissingRequestBodyError
Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityElement
Description: The SOAP 1.1 request is missing a security element.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityHeader
Description: Your request is missing a required header.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoLoggingStatusForKey
Description: There is no such thing as a logging status subresource for a key.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoSuchBucket
Description: The specified bucket does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchBucketPolicy
Description: The specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchKey
Description: The specified key does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchVersion
Description: Indicates that the version ID specified in the request does not match an existing version.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NotImplemented
Description: A header you provided implies functionality that is not implemented.
HTTP Status Code: 501 Not Implemented
SOAP Fault Code Prefix: Server
Code: NotSignedUp
Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: OperationAborted
Description: A conflicting conditional operation is currently in progress against this resource. Try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: PermanentRedirect
Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.
HTTP Status Code: 301 Moved Permanently
SOAP Fault Code Prefix: Client
Code: PreconditionFailed
Description: At least one of the preconditions you specified did not hold.
HTTP Status Code: 412 Precondition Failed
SOAP Fault Code Prefix: Client
Code: Redirect
Description: Temporary redirect.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: RestoreAlreadyInProgress
Description: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: RequestIsNotMultiPartContent
Description: Bucket POST must be of the enclosure-type multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeout
Description: Your socket connection to the server was not read from or written to within the timeout period.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeTooSkewed
Description: The difference between the request time and the server's time is too large.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: RequestTorrentOfBucketError
Description: Requesting the torrent file of a bucket is not permitted.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: SignatureDoesNotMatch
Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: ServiceUnavailable
Description: Reduce your request rate.
HTTP Status Code: 503 Service Unavailable
SOAP Fault Code Prefix: Server
Code: SlowDown
Description: Reduce your request rate.
HTTP Status Code: 503 Slow Down
SOAP Fault Code Prefix: Server
Code: TemporaryRedirect
Description: You are being redirected to the bucket while DNS updates.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: TokenRefreshRequired
Description: The provided token must be refreshed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: TooManyBuckets
Description: You have attempted to create more buckets than allowed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnexpectedContent
Description: This request does not support content.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnresolvableGrantByEmailAddress
Description: The email address you provided does not match any account on record.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UserKeyMustBeSpecified
Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
The single character used to indicate a row should be ignored when present at the start of a row.
" + "CSVInput$Comments": "A single character used to indicate that a row should be ignored when the character is present at the start of that row. You can specify any character to indicate a comment line.
" } }, "CommonPrefix": { - "base": "", + "base": "Container for all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter. CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix. For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.
", "refs": { "CommonPrefixList$member": null } @@ -542,10 +542,10 @@ "CommonPrefixList": { "base": null, "refs": { - "ListMultipartUploadsOutput$CommonPrefixes": "", - "ListObjectVersionsOutput$CommonPrefixes": "", - "ListObjectsOutput$CommonPrefixes": "", - "ListObjectsV2Output$CommonPrefixes": "CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter
" + "ListMultipartUploadsOutput$CommonPrefixes": "If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a CommonPrefixes element. The distinct key prefixes are returned in the Prefix child element.
", + "ListObjectVersionsOutput$CommonPrefixes": "All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.
", + "ListObjectsOutput$CommonPrefixes": "All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.
A response can contain CommonPrefixes only if you specify a delimiter.
CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.
CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.
For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.
", + "ListObjectsV2Output$CommonPrefixes": "All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.
A response can contain CommonPrefixes
only if you specify a delimiter.
CommonPrefixes
contains all (if there are any) keys between Prefix
and the next occurrence of the string specified by a delimiter.
CommonPrefixes
lists keys that act like subdirectories in the directory specified by Prefix
.
For example, if the prefix is notes/
and the delimiter is a slash (/
) as in notes/summer/july
, the common prefix is notes/summer/
. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.
The container for the completed multipart upload details.
", "refs": { - "CompleteMultipartUploadRequest$MultipartUpload": "" + "CompleteMultipartUploadRequest$MultipartUpload": "The container for the multipart upload request information.
" } }, "CompletedPart": { - "base": "", + "base": "Details of the parts that were uploaded.
", "refs": { "CompletedPartList$member": null } @@ -573,7 +573,7 @@ "CompletedPartList": { "base": null, "refs": { - "CompletedMultipartUpload$Parts": "" + "CompletedMultipartUpload$Parts": "Array of CompletedPart data types.
" } }, "CompressionType": { @@ -583,7 +583,7 @@ } }, "Condition": { - "base": "Specifies a condition that must be met for a redirect to apply.
", + "base": "A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs
folder, redirect to the /documents
folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.
A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs
folder, redirect to the /documents
folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.
Specifies presentational information for the object.
", "GetObjectOutput$ContentDisposition": "Specifies presentational information for the object.
", "HeadObjectOutput$ContentDisposition": "Specifies presentational information for the object.
", - "PutObjectRequest$ContentDisposition": "Specifies presentational information for the object.
" + "PutObjectRequest$ContentDisposition": "Specifies presentational information for the object. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1.
" } }, "ContentEncoding": { @@ -611,7 +611,7 @@ "CreateMultipartUploadRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
", "GetObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
", "HeadObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
", - "PutObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
" + "PutObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11.
" } }, "ContentLanguage": { @@ -629,31 +629,31 @@ "refs": { "GetObjectOutput$ContentLength": "Size of the body in bytes.
", "HeadObjectOutput$ContentLength": "Size of the body in bytes.
", - "PutObjectRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.
", + "PutObjectRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13.
", "UploadPartRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.
" } }, "ContentMD5": { "base": null, "refs": { - "PutBucketAclRequest$ContentMD5": "", - "PutBucketCorsRequest$ContentMD5": "", + "PutBucketAclRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
", + "PutBucketCorsRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
", "PutBucketEncryptionRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. This parameter is auto-populated when using the command from the CLI.
", "PutBucketLifecycleRequest$ContentMD5": "", - "PutBucketLoggingRequest$ContentMD5": "", - "PutBucketNotificationRequest$ContentMD5": "", - "PutBucketPolicyRequest$ContentMD5": "", - "PutBucketReplicationRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit.
", - "PutBucketRequestPaymentRequest$ContentMD5": "", - "PutBucketTaggingRequest$ContentMD5": "", - "PutBucketVersioningRequest$ContentMD5": "", - "PutBucketWebsiteRequest$ContentMD5": "", - "PutObjectAclRequest$ContentMD5": "", + "PutBucketLoggingRequest$ContentMD5": "The MD5 hash of the PutBucketLogging
request body.
The MD5 hash of the PutPublicAccessBlock
request body.
The MD5 hash of the request body.
", + "PutBucketReplicationRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
", + "PutBucketRequestPaymentRequest$ContentMD5": ">The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
", + "PutBucketTaggingRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
", + "PutBucketVersioningRequest$ContentMD5": ">The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
", + "PutBucketWebsiteRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
", + "PutObjectAclRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>
", "PutObjectLegalHoldRequest$ContentMD5": "The MD5 hash for the request body.
", "PutObjectLockConfigurationRequest$ContentMD5": "The MD5 hash for the request body.
", - "PutObjectRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.
", + "PutObjectRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.
", "PutObjectRetentionRequest$ContentMD5": "The MD5 hash for the request body.
", - "PutObjectTaggingRequest$ContentMD5": "", + "PutObjectTaggingRequest$ContentMD5": "The MD5 hash for the request body.
", "PutPublicAccessBlockRequest$ContentMD5": "The MD5 hash of the PutPublicAccessBlock
request body.
The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.
" } @@ -671,7 +671,7 @@ "CreateMultipartUploadRequest$ContentType": "A standard MIME type describing the format of the object data.
", "GetObjectOutput$ContentType": "A standard MIME type describing the format of the object data.
", "HeadObjectOutput$ContentType": "A standard MIME type describing the format of the object data.
", - "PutObjectRequest$ContentType": "A standard MIME type describing the format of the object data.
" + "PutObjectRequest$ContentType": "A standard MIME type describing the format of the contents. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17.
" } }, "ContinuationEvent": { @@ -691,15 +691,15 @@ } }, "CopyObjectResult": { - "base": "", + "base": ">Container for all response elements.
", "refs": { - "CopyObjectOutput$CopyObjectResult": "" + "CopyObjectOutput$CopyObjectResult": "Container for all response elements.
" } }, "CopyPartResult": { - "base": "", + "base": "Container for all response elements.
", "refs": { - "UploadPartCopyOutput$CopyPartResult": "" + "UploadPartCopyOutput$CopyPartResult": "Container for all response elements.
" } }, "CopySource": { @@ -767,14 +767,14 @@ "CopySourceVersionId": { "base": null, "refs": { - "CopyObjectOutput$CopySourceVersionId": "", + "CopyObjectOutput$CopySourceVersionId": "Version of the copied object in the destination bucket.
", "UploadPartCopyOutput$CopySourceVersionId": "The version of the source object that was copied, if you have enabled versioning on the source bucket.
" } }, "CreateBucketConfiguration": { - "base": "", + "base": "The configuration information for the bucket.
", "refs": { - "CreateBucketRequest$CreateBucketConfiguration": "" + "CreateBucketRequest$CreateBucketConfiguration": "The configuration information for the bucket.
" } }, "CreateBucketOutput": { @@ -807,7 +807,7 @@ "base": null, "refs": { "LifecycleExpiration$Date": "Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.
", - "ObjectLockRetention$RetainUntilDate": "The date on which this object lock retention expires.
", + "ObjectLockRetention$RetainUntilDate": "The date on which this Object Lock Retention will expire.
", "Transition$Date": "Indicates when objects are transitioned to the specified storage class. The date value must be in ISO 8601 format. The time is always midnight UTC.
" } }, @@ -829,15 +829,15 @@ } }, "DefaultRetention": { - "base": "The container element for specifying the default object lock retention settings for new objects placed in the specified bucket.
", + "base": "The container element for specifying the default Object Lock retention settings for new objects placed in the specified bucket.
", "refs": { "ObjectLockRule$DefaultRetention": "The default retention period that you want to apply to new objects placed in the specified bucket.
" } }, "Delete": { - "base": "", + "base": "Container for the objects to delete.
", "refs": { - "DeleteObjectsRequest$Delete": "" + "DeleteObjectsRequest$Delete": "Container for the request.
" } }, "DeleteBucketAnalyticsConfigurationRequest": { @@ -899,39 +899,39 @@ "base": null, "refs": { "DeleteObjectOutput$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.
", - "DeletedObject$DeleteMarker": "", + "DeletedObject$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created.
", "GetObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
", "HeadObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
" } }, "DeleteMarkerEntry": { - "base": "", + "base": "Information about the delete marker.
", "refs": { "DeleteMarkers$member": null } }, "DeleteMarkerReplication": { - "base": "Specifies whether Amazon S3 should replicate delete makers.
", + "base": "Specifies whether Amazon S3 replicates the delete markers. If you specify a Filter
, you must specify this element. However, in the latest version of replication configuration (when Filter
is specified), Amazon S3 doesn't replicate delete markers. Therefore, the DeleteMarkerReplication
element can contain only <Status>Disabled</Status>. For an example configuration, see Basic Rule Configuration.
If you don't specify the Filter element, Amazon S3 assumes the replication configuration is the earlier version, V1. In the earlier version, Amazon S3 handled replication of delete markers differently. For more information, see Backward Compatibility.
The status of the delete marker replication.
In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled
.
Indicates whether to replicate delete markers.
In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled
.
The version ID of the delete marker created as a result of the DELETE operation. If you delete a specific object version, the value returned by this header is the version ID of the object version deleted.
" } }, "DeleteMarkers": { "base": null, "refs": { - "ListObjectVersionsOutput$DeleteMarkers": "" + "ListObjectVersionsOutput$DeleteMarkers": "Container for an object that is a delete marker.
" } }, "DeleteObjectOutput": { @@ -970,7 +970,7 @@ } }, "DeletedObject": { - "base": "", + "base": "Information about the deleted object.
", "refs": { "DeletedObjects$member": null } @@ -978,19 +978,19 @@ "DeletedObjects": { "base": null, "refs": { - "DeleteObjectsOutput$Deleted": "" + "DeleteObjectsOutput$Deleted": "Container element for a successful delete. It identifies the object that was successfully deleted.
" } }, "Delimiter": { "base": null, "refs": { - "ListMultipartUploadsOutput$Delimiter": "", - "ListMultipartUploadsRequest$Delimiter": "Character you use to group keys.
", - "ListObjectVersionsOutput$Delimiter": "", - "ListObjectVersionsRequest$Delimiter": "A delimiter is a character you use to group keys.
", - "ListObjectsOutput$Delimiter": "", + "ListMultipartUploadsOutput$Delimiter": "Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response.
", + "ListMultipartUploadsRequest$Delimiter": "Character you use to group keys.
All keys that contain the same string between the prefix, if specified, and the first occurrence of the delimiter after the prefix are grouped under a single result element, CommonPrefixes
. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under CommonPrefixes
result element are not returned elsewhere in the response.
The delimeter grouping the included keys. A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.
", + "ListObjectVersionsRequest$Delimiter": "A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix
and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.
Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.
", "ListObjectsRequest$Delimiter": "A delimiter is a character you use to group keys.
", - "ListObjectsV2Output$Delimiter": "A delimiter is a character you use to group keys.
", + "ListObjectsV2Output$Delimiter": "Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.
", "ListObjectsV2Request$Delimiter": "A delimiter is a character you use to group keys.
" } }, @@ -1011,20 +1011,20 @@ "refs": { "Grantee$DisplayName": "Screen name of the grantee.
", "Initiator$DisplayName": "Name of the Principal.
", - "Owner$DisplayName": "" + "Owner$DisplayName": "Container for the display name of the owner
" } }, "ETag": { "base": null, "refs": { - "CompleteMultipartUploadOutput$ETag": "Entity tag of the object.
", + "CompleteMultipartUploadOutput$ETag": "Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.
", "CompletedPart$ETag": "Entity tag returned when the part was uploaded.
", - "CopyObjectResult$ETag": "", + "CopyObjectResult$ETag": "Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied object.
", "CopyPartResult$ETag": "Entity tag of the object.
", "GetObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL
", "HeadObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL
", - "Object$ETag": "", - "ObjectVersion$ETag": "", + "Object$ETag": "The entity tag is an MD5 hash of the object. ETag reflects only changes to the contents of an object, not its metadata.
", + "ObjectVersion$ETag": "The entity tag is an MD5 hash of that version of the object
", "Part$ETag": "Entity tag returned when the part was uploaded.
", "PutObjectOutput$ETag": "Entity tag for the uploaded object.
", "UploadPartOutput$ETag": "Entity tag for the uploaded object.
" @@ -1045,20 +1045,20 @@ "EncodingType": { "base": "Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.
", "refs": { - "ListMultipartUploadsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.
", + "ListMultipartUploadsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.
If you specify encoding-type
request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:
Delimiter
, KeyMarker
, Prefix
, NextKeyMarker
, Key
.
Encoding type used by Amazon S3 to encode object keys in the response.
", + "ListObjectVersionsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object key names in the XML response.
If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:
KeyMarker, NextKeyMarker, Prefix, Key
, and Delimiter
.
Encoding type used by Amazon S3 to encode object keys in the response.
", "ListObjectsRequest$EncodingType": null, - "ListObjectsV2Output$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.
", + "ListObjectsV2Output$EncodingType": "Encoding type used by Amazon S3 to encode object key names in the XML response.
If you specify the encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:
Delimiter, Prefix, Key,
and StartAfter
.
Encoding type used by Amazon S3 to encode object keys in the response.
" } }, "Encryption": { - "base": "Describes the server-side encryption that will be applied to the restore results.
", + "base": "Contains the type of server-side encryption used.
", "refs": { - "S3Location$Encryption": "" + "S3Location$Encryption": null } }, "EncryptionConfiguration": { @@ -1070,32 +1070,32 @@ "End": { "base": null, "refs": { - "ScanRange$End": "Specifies the end of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is one less than the size of the object being queried.
" + "ScanRange$End": "Specifies the end of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is one less than the size of the object being queried. If only the End parameter is supplied, it is interpreted to mean scan the last N bytes of the file. For example; <scanrange><end>50</end></scanrange>
means scan the last 50 bytes.
A message that indicates the request is complete and no more messages will be sent. You should not assume that the request is complete until the client receives an EndEvent
.
The End Event.
" } }, "Error": { - "base": "", + "base": "Container for all error elements.
", "refs": { "Errors$member": null } }, "ErrorDocument": { - "base": "", + "base": "The error information.
", "refs": { - "GetBucketWebsiteOutput$ErrorDocument": "", + "GetBucketWebsiteOutput$ErrorDocument": "The name of the error document for the website.
", "WebsiteConfiguration$ErrorDocument": "The name of the error document for the website.
" } }, "Errors": { "base": null, "refs": { - "DeleteObjectsOutput$Errors": "" + "DeleteObjectsOutput$Errors": "Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.
" } }, "Event": { @@ -1110,12 +1110,24 @@ "EventList": { "base": null, "refs": { - "CloudFunctionConfiguration$Events": "", + "CloudFunctionConfiguration$Events": "Bucket events for which to send notifications.
", "LambdaFunctionConfiguration$Events": "The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.
", - "QueueConfiguration$Events": "", - "QueueConfigurationDeprecated$Events": "", + "QueueConfiguration$Events": "A collection of bucket events for which to send notiications
", + "QueueConfigurationDeprecated$Events": "A collection of bucket events for which to send notiications
", "TopicConfiguration$Events": "The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.
", - "TopicConfigurationDeprecated$Events": "" + "TopicConfigurationDeprecated$Events": "A collection of events related to objects
" + } + }, + "ExistingObjectReplication": { + "base": "A container that specifies information about existing object replication. You can choose whether to enable or disable the replication of existing objects.
", + "refs": { + "ReplicationRule$ExistingObjectReplication": "A container that specifies information about existing object replication. You can choose whether to enable or disable the replication of existing objects.
" + } + }, + "ExistingObjectReplicationStatus": { + "base": null, + "refs": { + "ExistingObjectReplication$Status": "Specifies whether existing object replication is enabled.
" } }, "Expiration": { @@ -1125,7 +1137,7 @@ "CopyObjectOutput$Expiration": "If the object expiration is configured, the response includes this header.
", "GetObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.
", "HeadObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.
", - "PutObjectOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
" + "PutObjectOutput$Expiration": "If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL encoded.
" } }, "ExpirationStatus": { @@ -1148,7 +1160,7 @@ "CreateMultipartUploadRequest$Expires": "The date and time at which the object is no longer cacheable.
", "GetObjectOutput$Expires": "The date and time at which the object is no longer cacheable.
", "HeadObjectOutput$Expires": "The date and time at which the object is no longer cacheable.
", - "PutObjectRequest$Expires": "The date and time at which the object is no longer cacheable.
" + "PutObjectRequest$Expires": "The date and time at which the object is no longer cacheable. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21.
" } }, "ExposeHeader": { @@ -1186,14 +1198,14 @@ "FieldDelimiter": { "base": null, "refs": { - "CSVInput$FieldDelimiter": "The value used to separate individual fields in a record.
", - "CSVOutput$FieldDelimiter": "The value used to separate individual fields in a record.
" + "CSVInput$FieldDelimiter": "A single character used to separate individual fields in a record. You can specify an arbitrary delimiter.
", + "CSVOutput$FieldDelimiter": "The value used to separate individual fields in a record. You can specify an arbitrary delimiter.
" } }, "FileHeaderInfo": { "base": null, "refs": { - "CSVInput$FileHeaderInfo": "Describes the first line of input. Valid values: None, Ignore, Use.
" + "CSVInput$FileHeaderInfo": "Describes the first line of input. Valid values are:
NONE
: First line is not a header.
IGNORE
: First line is a header, but you can't use the header values to indicate the column in an expression. You can use column position (such as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s
).
Use
: First line is a header, and you can use the header value to identify a column in an expression (SELECT \"name\" FROM OBJECT
).
A list of containers for the key value pair that defines the criteria for the filter rule.
", "refs": { - "S3KeyFilter$FilterRules": "" + "S3KeyFilter$FilterRules": null } }, "FilterRuleName": { @@ -1486,13 +1498,13 @@ } }, "GlacierJobParameters": { - "base": "", + "base": "Container for Glacier job parameters.
", "refs": { "RestoreRequest$GlacierJobParameters": "Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.
" } }, "Grant": { - "base": "", + "base": "Container for grant information.
", "refs": { "Grants$member": null } @@ -1550,10 +1562,10 @@ } }, "Grantee": { - "base": "", + "base": "Container for the person being granted permissions.
", "refs": { - "Grant$Grantee": "", - "TargetGrant$Grantee": "" + "Grant$Grantee": "The person being granted permissions.
", + "TargetGrant$Grantee": "Container for the person being granted permissions.
" } }, "Grants": { @@ -1605,7 +1617,7 @@ "Grantee$ID": "The canonical user ID of the grantee.
", "Initiator$ID": "If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.
", "LifecycleRule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters.
", - "Owner$ID": "", + "Owner$ID": "Container for the ID of the owner
", "ReplicationRule$ID": "A unique identifier for the rule. The maximum value is 255 characters.
", "Rule$ID": "Unique identifier for the rule. The value can't be longer than 255 characters.
" } @@ -1639,9 +1651,9 @@ } }, "IndexDocument": { - "base": "", + "base": "Container for the Suffix element.
", "refs": { - "GetBucketWebsiteOutput$IndexDocument": "", + "GetBucketWebsiteOutput$IndexDocument": "The name of the index document for the website.
", "WebsiteConfiguration$IndexDocument": "The name of the index document for the website.
" } }, @@ -1652,9 +1664,9 @@ } }, "Initiator": { - "base": "", + "base": "Container element that identifies who initiated the ultipart upload.
", "refs": { - "ListPartsOutput$Initiator": "Identifies who initiated the multipart upload.
", + "ListPartsOutput$Initiator": "Container element that identifies who initiated the multipart upload. If the initiator is an AWS account, this element provides the same information as the Owner element. If the initiator is an IAM User, then this element provides the user ARN and display name.
", "MultipartUpload$Initiator": "Identifies who initiated the multipart upload.
" } }, @@ -1680,7 +1692,7 @@ } }, "InventoryDestination": { - "base": "", + "base": "Specifies the inventory configuration for an Amazon S3 bucket.
", "refs": { "InventoryConfiguration$Destination": "Contains information about where to publish the inventory results.
" } @@ -1692,7 +1704,7 @@ } }, "InventoryFilter": { - "base": "", + "base": "Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.
", "refs": { "InventoryConfiguration$Filter": "Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.
" } @@ -1737,13 +1749,13 @@ } }, "InventoryS3BucketDestination": { - "base": "", + "base": "Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.
", "refs": { "InventoryDestination$S3BucketDestination": "Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.
" } }, "InventorySchedule": { - "base": "", + "base": "Specifies the schedule for generating inventory results.
", "refs": { "InventoryConfiguration$Schedule": "Specifies the schedule for generating inventory results.
" } @@ -1771,23 +1783,23 @@ "base": null, "refs": { "ListBucketAnalyticsConfigurationsOutput$IsTruncated": "Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.
", - "ListBucketInventoryConfigurationsOutput$IsTruncated": "Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.
", + "ListBucketInventoryConfigurationsOutput$IsTruncated": "Tells whether the returned list of inventory configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken is provided for a subsequent request.
", "ListBucketMetricsConfigurationsOutput$IsTruncated": "Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.
", "ListMultipartUploadsOutput$IsTruncated": "Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.
", "ListObjectVersionsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.
", "ListObjectsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.
", - "ListObjectsV2Output$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.
", - "ListPartsOutput$IsTruncated": "Indicates whether the returned list of parts is truncated.
" + "ListObjectsV2Output$IsTruncated": "Set to false if all of the results were returned. Set to true if more keys are available to return. If the number of results exceeds that specified by MaxKeys, all of the results might not be returned.
", + "ListPartsOutput$IsTruncated": "Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts element.
" } }, "JSONInput": { - "base": "", + "base": "Specifies JSON as object's input serialization format.
", "refs": { "InputSerialization$JSON": "Specifies JSON as object's input serialization format.
" } }, "JSONOutput": { - "base": "", + "base": "Specifies JSON as request's output serialization format.
", "refs": { "OutputSerialization$JSON": "Specifies JSON as request's output serialization format.
" } @@ -1814,7 +1826,7 @@ "base": null, "refs": { "ListMultipartUploadsOutput$KeyMarker": "The key at or after which the listing began.
", - "ListMultipartUploadsRequest$KeyMarker": "Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.
", + "ListMultipartUploadsRequest$KeyMarker": "Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.
If upload-id-marker
is not specified, only the keys lexicographically greater than the specified key-marker
will be included in the list.
If upload-id-marker
is specified, any multipart uploads for a key equal to the key-marker
might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker
.
Marks the last Key returned in a truncated response.
", "ListObjectVersionsRequest$KeyMarker": "Specifies the key to start with when listing objects in a bucket.
" } @@ -1846,31 +1858,31 @@ "LastModified": { "base": null, "refs": { - "CopyObjectResult$LastModified": "", + "CopyObjectResult$LastModified": "Returns the date that the object was last modified.
", "CopyPartResult$LastModified": "Date and time at which the object was uploaded.
", "DeleteMarkerEntry$LastModified": "Date and time the object was last modified.
", "GetObjectOutput$LastModified": "Last modified date of the object
", "HeadObjectOutput$LastModified": "Last modified date of the object
", - "Object$LastModified": "", + "Object$LastModified": "The date the Object was Last Modified
", "ObjectVersion$LastModified": "Date and time the object was last modified.
", "Part$LastModified": "Date and time at which the part was uploaded.
" } }, "LifecycleConfiguration": { - "base": "", + "base": "Container for lifecycle rules. You can add as many as 1000 rules.
", "refs": { "PutBucketLifecycleRequest$LifecycleConfiguration": "" } }, "LifecycleExpiration": { - "base": "", + "base": "Container for the expiration for the lifecycle of the object.
", "refs": { - "LifecycleRule$Expiration": "", - "Rule$Expiration": "" + "LifecycleRule$Expiration": "Specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker.
", + "Rule$Expiration": "Specifies the expiration for the lifecycle of the object.
" } }, "LifecycleRule": { - "base": "", + "base": "A lifecycle rule for individual objects in an Amazon S3 bucket.
", "refs": { "LifecycleRules$member": null } @@ -1878,20 +1890,20 @@ "LifecycleRuleAndOperator": { "base": "This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.
", "refs": { - "LifecycleRuleFilter$And": "" + "LifecycleRuleFilter$And": null } }, "LifecycleRuleFilter": { "base": "The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.
", "refs": { - "LifecycleRule$Filter": "" + "LifecycleRule$Filter": null } }, "LifecycleRules": { "base": null, "refs": { "BucketLifecycleConfiguration$Rules": "A lifecycle rule for individual objects in an Amazon S3 bucket.
", - "GetBucketLifecycleConfigurationOutput$Rules": "" + "GetBucketLifecycleConfigurationOutput$Rules": "Container for a lifecycle rule.
" } }, "ListBucketAnalyticsConfigurationsOutput": { @@ -1982,8 +1994,8 @@ "Location": { "base": null, "refs": { - "CompleteMultipartUploadOutput$Location": "", - "CreateBucketOutput$Location": "" + "CompleteMultipartUploadOutput$Location": "The URI that identifies the newly created object.
", + "CreateBucketOutput$Location": "Specifies the region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) region (us-east-1), you do not need to specify the location.
" } }, "LocationPrefix": { @@ -1995,15 +2007,15 @@ "LoggingEnabled": { "base": "Describes where logs are stored and the prefix that Amazon S3 assigns to all log object keys for a bucket. For more information, see PUT Bucket logging in the Amazon Simple Storage Service API Reference.
", "refs": { - "BucketLoggingStatus$LoggingEnabled": "", - "GetBucketLoggingOutput$LoggingEnabled": "" + "BucketLoggingStatus$LoggingEnabled": null, + "GetBucketLoggingOutput$LoggingEnabled": null } }, "MFA": { "base": null, "refs": { - "DeleteObjectRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.
", - "DeleteObjectsRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.
", + "DeleteObjectRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versionedobject if versioning is configured with MFA Deleteenabled.
", + "DeleteObjectsRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA Delete enabled.
", "PutBucketVersioningRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.
" } }, @@ -2022,7 +2034,7 @@ "Marker": { "base": null, "refs": { - "ListObjectsOutput$Marker": "", + "ListObjectsOutput$Marker": "Indicates where in the bucket listing begins. Marker is included in the response if it was sent with the request.
", "ListObjectsRequest$Marker": "Specifies the key to start with when listing objects in a bucket.
" } }, @@ -2035,9 +2047,9 @@ "MaxKeys": { "base": null, "refs": { - "ListObjectVersionsOutput$MaxKeys": "", - "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.
", - "ListObjectsOutput$MaxKeys": "", + "ListObjectVersionsOutput$MaxKeys": "Specifies the maximum number of objects to return.
", + "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.
", + "ListObjectsOutput$MaxKeys": "The maximum number of keys returned in the response body.
", "ListObjectsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.
", "ListObjectsV2Output$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.
", "ListObjectsV2Request$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.
" @@ -2060,7 +2072,7 @@ "Message": { "base": null, "refs": { - "Error$Message": "" + "Error$Message": "The error message contains a generic description of the error condition in English. It is intended for a human audience. Simple programs display the message directly to the end user if they encounter an error condition they don't know how or don't care to handle. Sophisticated programs with more exhaustive error handling and proper internationalization are more likely to ignore the error message.
" } }, "Metadata": { @@ -2089,18 +2101,24 @@ "base": null, "refs": { "Metadata$key": null, - "MetadataEntry$Name": "" + "MetadataEntry$Name": "Name of the Object.
" } }, "MetadataValue": { "base": null, "refs": { "Metadata$value": null, - "MetadataEntry$Value": "" + "MetadataEntry$Value": "Value of the Object.
" + } + }, + "Metrics": { + "base": " A container specifying replication metrics-related information, including whether emitting metrics and Amazon S3 events for replication are enabled. In addition, contains configurations related to specific metrics or events. Must be specified together with a ReplicationTime
block.
A container specifying replication metrics-related information, including whether emitting metrics and Amazon S3 events for replication are enabled. In addition, contains configurations related to specific metrics or events. Must be specified together with a ReplicationTime
block.
A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.
", "refs": { "MetricsFilter$And": "A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.
" } @@ -2120,7 +2138,7 @@ } }, "MetricsFilter": { - "base": "", + "base": "Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).
", "refs": { "MetricsConfiguration$Filter": "Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).
" } @@ -2134,6 +2152,18 @@ "PutBucketMetricsConfigurationRequest$Id": "The ID used to identify the metrics configuration.
" } }, + "MetricsStatus": { + "base": null, + "refs": { + "Metrics$Status": "Specifies whether the replication metrics are enabled.
" + } + }, + "Minutes": { + "base": null, + "refs": { + "ReplicationTimeValue$Minutes": "Contains an integer specifying time in minutes.
" + } + }, "MissingMeta": { "base": null, "refs": { @@ -2142,7 +2172,7 @@ } }, "MultipartUpload": { - "base": "", + "base": "Container for the MultipartUpload for the Amazon S3 object.
", "refs": { "MultipartUploadList$member": null } @@ -2151,7 +2181,7 @@ "base": null, "refs": { "AbortMultipartUploadRequest$UploadId": "Upload ID that identifies the multipart upload.
", - "CompleteMultipartUploadRequest$UploadId": "", + "CompleteMultipartUploadRequest$UploadId": "ID for the initiated multipart upload.
", "CreateMultipartUploadOutput$UploadId": "ID for the initiated multipart upload.
", "ListPartsOutput$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.
", "ListPartsRequest$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.
", @@ -2163,14 +2193,14 @@ "MultipartUploadList": { "base": null, "refs": { - "ListMultipartUploadsOutput$Uploads": "" + "ListMultipartUploadsOutput$Uploads": "Container for elements related to a particular multipart upload. A response can contain zero or more Upload elements.
" } }, "NextKeyMarker": { "base": null, "refs": { "ListMultipartUploadsOutput$NextKeyMarker": "When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.
", - "ListObjectVersionsOutput$NextKeyMarker": "Use this value for the key marker request parameter in a subsequent request.
" + "ListObjectVersionsOutput$NextKeyMarker": "When the number of responses exceeds the value of MaxKeys, NextKeyMarker specifies the first key not returned that satisfies the search criteria. Use this value for the key-marker request parameter in a subsequent request.
" } }, "NextMarker": { @@ -2203,7 +2233,7 @@ "NextVersionIdMarker": { "base": null, "refs": { - "ListObjectVersionsOutput$NextVersionIdMarker": "Use this value for the next version id marker parameter in a subsequent request.
" + "ListObjectVersionsOutput$NextVersionIdMarker": "When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker specifies the first object version not returned that satisfies the search criteria. Use this value for the version-id-marker request parameter in a subsequent request.
" } }, "NoSuchBucket": { @@ -2224,41 +2254,41 @@ "NoncurrentVersionExpiration": { "base": "Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.
", "refs": { - "LifecycleRule$NoncurrentVersionExpiration": "", - "Rule$NoncurrentVersionExpiration": "" + "LifecycleRule$NoncurrentVersionExpiration": null, + "Rule$NoncurrentVersionExpiration": null } }, "NoncurrentVersionTransition": { "base": "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER
, or DEEP_ARCHIVE
storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER
, or DEEP_ARCHIVE
storage class at a specific period in the object's lifetime.
Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to the a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the a specifc storage class at a set period in the object's lifetime.
" } }, "NotificationConfiguration": { "base": "A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket.
", "refs": { - "PutBucketNotificationConfigurationRequest$NotificationConfiguration": "" + "PutBucketNotificationConfigurationRequest$NotificationConfiguration": null } }, "NotificationConfigurationDeprecated": { "base": null, "refs": { - "PutBucketNotificationRequest$NotificationConfiguration": "" + "PutBucketNotificationRequest$NotificationConfiguration": "The container for the configuration.
" } }, "NotificationConfigurationFilter": { "base": "Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "LambdaFunctionConfiguration$Filter": "", - "QueueConfiguration$Filter": "", - "TopicConfiguration$Filter": "" + "LambdaFunctionConfiguration$Filter": null, + "QueueConfiguration$Filter": null, + "TopicConfiguration$Filter": null } }, "NotificationId": { @@ -2273,7 +2303,7 @@ } }, "Object": { - "base": "", + "base": "An object consists of data and its descriptive metadata.
", "refs": { "ObjectList$member": null } @@ -2288,13 +2318,13 @@ "refs": { "CopyObjectRequest$ACL": "The canned ACL to apply to the object.
", "CreateMultipartUploadRequest$ACL": "The canned ACL to apply to the object.
", - "PutObjectAclRequest$ACL": "The canned ACL to apply to the object.
", - "PutObjectRequest$ACL": "The canned ACL to apply to the object.
", + "PutObjectAclRequest$ACL": "The canned ACL to apply to the object. For more information, see Canned ACL
", + "PutObjectRequest$ACL": "The canned ACL to apply to the object. For more information, see Canned ACL.
", "S3Location$CannedACL": "The canned ACL to apply to the restore results.
" } }, "ObjectIdentifier": { - "base": "", + "base": "Object Identifier is unique value to identify objects.
", "refs": { "ObjectIdentifierList$member": null } @@ -2302,73 +2332,73 @@ "ObjectIdentifierList": { "base": null, "refs": { - "Delete$Objects": "" + "Delete$Objects": "The objects to delete.
" } }, "ObjectKey": { "base": null, "refs": { "AbortMultipartUploadRequest$Key": "Key of the object for which the multipart upload was initiated.
", - "CompleteMultipartUploadOutput$Key": "", - "CompleteMultipartUploadRequest$Key": "", - "CopyObjectRequest$Key": "", + "CompleteMultipartUploadOutput$Key": "The object key of the newly created object.
", + "CompleteMultipartUploadRequest$Key": "Object key for which the multipart upload was initiated.
", + "CopyObjectRequest$Key": "The key of the destination object.
", "CreateMultipartUploadOutput$Key": "Object key for which the multipart upload was initiated.
", - "CreateMultipartUploadRequest$Key": "", + "CreateMultipartUploadRequest$Key": "Object key for which the multipart upload is to be initiated.
", "DeleteMarkerEntry$Key": "The object key.
", - "DeleteObjectRequest$Key": "", - "DeleteObjectTaggingRequest$Key": "", - "DeletedObject$Key": "", - "Error$Key": "", + "DeleteObjectRequest$Key": "Key name of the object to delete.
", + "DeleteObjectTaggingRequest$Key": "Name of the tag.
", + "DeletedObject$Key": "The name of the deleted object.
", + "Error$Key": "The error key.
", "ErrorDocument$Key": "The object key name to use when a 4XX class error occurs.
", - "GetObjectAclRequest$Key": "", + "GetObjectAclRequest$Key": "The key of the object for which to get the ACL information.
", "GetObjectLegalHoldRequest$Key": "The key name for the object whose Legal Hold status you want to retrieve.
", - "GetObjectRequest$Key": "", + "GetObjectRequest$Key": "Key of the object to get.
", "GetObjectRetentionRequest$Key": "The key name for the object whose retention settings you want to retrieve.
", - "GetObjectTaggingRequest$Key": "", - "GetObjectTorrentRequest$Key": "", - "HeadObjectRequest$Key": "", + "GetObjectTaggingRequest$Key": "Object key for which to get the tagging information.
", + "GetObjectTorrentRequest$Key": "The object key for which to get the information.
", + "HeadObjectRequest$Key": "The object key.
", "ListPartsOutput$Key": "Object key for which the multipart upload was initiated.
", - "ListPartsRequest$Key": "", + "ListPartsRequest$Key": "Object key for which the multipart upload was initiated.
", "MultipartUpload$Key": "Key of the object for which the multipart upload was initiated.
", - "Object$Key": "", + "Object$Key": "The name that you assign to an object. You use the object key to retrieve the object.
", "ObjectIdentifier$Key": "Key name of the object to delete.
", "ObjectVersion$Key": "The object key.
", - "PutObjectAclRequest$Key": "", + "PutObjectAclRequest$Key": "Key for which the PUT operation was initiated.
", "PutObjectLegalHoldRequest$Key": "The key name for the object that you want to place a Legal Hold on.
", "PutObjectRequest$Key": "Object key for which the PUT operation was initiated.
", "PutObjectRetentionRequest$Key": "The key name for the object that you want to apply this Object Retention configuration to.
", - "PutObjectTaggingRequest$Key": "", - "RestoreObjectRequest$Key": "", + "PutObjectTaggingRequest$Key": "Name of the tag.
", + "RestoreObjectRequest$Key": "Object key for which the operation was initiated.
", "SelectObjectContentRequest$Key": "The object key.
", "Tag$Key": "Name of the tag.
", - "UploadPartCopyRequest$Key": "", + "UploadPartCopyRequest$Key": "Object key for which the multipart upload was initiated.
", "UploadPartRequest$Key": "Object key for which the multipart upload was initiated.
" } }, "ObjectList": { "base": null, "refs": { - "ListObjectsOutput$Contents": "", + "ListObjectsOutput$Contents": "Metadata about each object returned.
", "ListObjectsV2Output$Contents": "Metadata about each object returned.
" } }, "ObjectLockConfiguration": { - "base": "The container element for object lock configuration parameters.
", + "base": "The container element for Object Lock configuration parameters.
", "refs": { - "GetObjectLockConfigurationOutput$ObjectLockConfiguration": "The specified bucket's object lock configuration.
", - "PutObjectLockConfigurationRequest$ObjectLockConfiguration": "The object lock configuration that you want to apply to the specified bucket.
" + "GetObjectLockConfigurationOutput$ObjectLockConfiguration": "The specified bucket's Object Lock configuration.
", + "PutObjectLockConfigurationRequest$ObjectLockConfiguration": "The Object Lock configuration that you want to apply to the specified bucket.
" } }, "ObjectLockEnabled": { "base": null, "refs": { - "ObjectLockConfiguration$ObjectLockEnabled": "Indicates whether this bucket has an object lock configuration enabled.
" + "ObjectLockConfiguration$ObjectLockEnabled": "Indicates whether this bucket has an Object Lock configuration enabled.
" } }, "ObjectLockEnabledForBucket": { "base": null, "refs": { - "CreateBucketRequest$ObjectLockEnabledForBucket": "Specifies whether you want Amazon S3 object lock to be enabled for the new bucket.
" + "CreateBucketRequest$ObjectLockEnabledForBucket": "Specifies whether you want S3 Object Lock to be enabled for the new bucket.
" } }, "ObjectLockLegalHold": { @@ -2383,30 +2413,30 @@ "refs": { "CopyObjectRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a Legal Hold to the copied object.
", "CreateMultipartUploadRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a Legal Hold to the uploaded object.
", - "GetObjectOutput$ObjectLockLegalHoldStatus": "Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.
", - "HeadObjectOutput$ObjectLockLegalHoldStatus": "The Legal Hold status for the specified object.
", + "GetObjectOutput$ObjectLockLegalHoldStatus": "Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.
", + "HeadObjectOutput$ObjectLockLegalHoldStatus": "Specifies whether a legal hold is in effect for this object. This header is only returned if the requester has the s3:GetObjectLegalHold
permission. This header is not returned if the specified version of this object has never had a legal hold applied. For more information about S3 Object Lock, see Object Lock.
Indicates whether the specified object has a Legal Hold in place.
", - "PutObjectRequest$ObjectLockLegalHoldStatus": "The Legal Hold status that you want to apply to the specified object.
" + "PutObjectRequest$ObjectLockLegalHoldStatus": "Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock.
" } }, "ObjectLockMode": { "base": null, "refs": { - "CopyObjectRequest$ObjectLockMode": "The object lock mode that you want to apply to the copied object.
", - "CreateMultipartUploadRequest$ObjectLockMode": "Specifies the object lock mode that you want to apply to the uploaded object.
", - "GetObjectOutput$ObjectLockMode": "The object lock mode currently in place for this object.
", - "HeadObjectOutput$ObjectLockMode": "The object lock mode currently in place for this object.
", - "PutObjectRequest$ObjectLockMode": "The object lock mode that you want to apply to this object.
" + "CopyObjectRequest$ObjectLockMode": "The Object Lock mode that you want to apply to the copied object.
", + "CreateMultipartUploadRequest$ObjectLockMode": "Specifies the Object Lock mode that you want to apply to the uploaded object.
", + "GetObjectOutput$ObjectLockMode": "The Object Lock mode currently in place for this object.
", + "HeadObjectOutput$ObjectLockMode": "The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the s3:GetObjectRetention
permission. For more information about S3 Object Lock, see Object Lock.
The Object Lock mode that you want to apply to this object.
" } }, "ObjectLockRetainUntilDate": { "base": null, "refs": { - "CopyObjectRequest$ObjectLockRetainUntilDate": "The date and time when you want the copied object's object lock to expire.
", - "CreateMultipartUploadRequest$ObjectLockRetainUntilDate": "Specifies the date and time when you want the object lock to expire.
", - "GetObjectOutput$ObjectLockRetainUntilDate": "The date and time when this object's object lock will expire.
", - "HeadObjectOutput$ObjectLockRetainUntilDate": "The date and time when this object's object lock expires.
", - "PutObjectRequest$ObjectLockRetainUntilDate": "The date and time when you want this object's object lock to expire.
" + "CopyObjectRequest$ObjectLockRetainUntilDate": "The date and time when you want the copied object's Object Lock to expire.
", + "CreateMultipartUploadRequest$ObjectLockRetainUntilDate": "Specifies the date and time when you want the Object Lock to expire.
", + "GetObjectOutput$ObjectLockRetainUntilDate": "The date and time when this object's Object Lock will expire.
", + "HeadObjectOutput$ObjectLockRetainUntilDate": "The date and time when the Object Lock retention period expires. This header is only returned if the requester has the s3:GetObjectRetention
permission.
The date and time when you want this object's Object Lock to expire.
" } }, "ObjectLockRetention": { @@ -2419,21 +2449,21 @@ "ObjectLockRetentionMode": { "base": null, "refs": { - "DefaultRetention$Mode": "The default object lock retention mode you want to apply to new objects placed in the specified bucket.
", + "DefaultRetention$Mode": "The default Object Lock retention mode you want to apply to new objects placed in the specified bucket.
", "ObjectLockRetention$Mode": "Indicates the Retention mode for the specified object.
" } }, "ObjectLockRule": { - "base": "The container element for an object lock rule.
", + "base": "The container element for an Object Lock rule.
", "refs": { - "ObjectLockConfiguration$Rule": "The object lock rule in place for the specified object.
" + "ObjectLockConfiguration$Rule": "The Object Lock rule in place for the specified object.
" } }, "ObjectLockToken": { "base": null, "refs": { - "PutBucketReplicationRequest$Token": "A token that allows Amazon S3 object lock to be enabled for an existing bucket.
", - "PutObjectLockConfigurationRequest$Token": "A token to allow Amazon S3 object lock to be enabled for an existing bucket.
" + "PutBucketReplicationRequest$Token": "", + "PutObjectLockConfigurationRequest$Token": "A token to allow Object Lock to be enabled for an existing bucket.
" } }, "ObjectNotInActiveTierError": { @@ -2448,7 +2478,7 @@ } }, "ObjectVersion": { - "base": "", + "base": "The version of an object.
", "refs": { "ObjectVersionList$member": null } @@ -2456,22 +2486,22 @@ "ObjectVersionId": { "base": null, "refs": { - "CompleteMultipartUploadOutput$VersionId": "Version of the object.
", + "CompleteMultipartUploadOutput$VersionId": "Version ID of the newly created object, in case the bucket has versioning turned on.
", "CopyObjectOutput$VersionId": "Version ID of the newly created copy.
", "DeleteMarkerEntry$VersionId": "Version ID of an object.
", "DeleteObjectOutput$VersionId": "Returns the version ID of the delete marker created as a result of the DELETE operation.
", "DeleteObjectRequest$VersionId": "VersionId used to reference a specific version of the object.
", "DeleteObjectTaggingOutput$VersionId": "The versionId of the object the tag-set was removed from.
", "DeleteObjectTaggingRequest$VersionId": "The versionId of the object that the tag-set will be removed from.
", - "DeletedObject$VersionId": "", - "Error$VersionId": "", + "DeletedObject$VersionId": "The version ID of the deleted object.
", + "Error$VersionId": "The version ID of the error.
", "GetObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.
", "GetObjectLegalHoldRequest$VersionId": "The version ID of the object whose Legal Hold status you want to retrieve.
", "GetObjectOutput$VersionId": "Version of the object.
", "GetObjectRequest$VersionId": "VersionId used to reference a specific version of the object.
", "GetObjectRetentionRequest$VersionId": "The version ID for the object whose retention settings you want to retrieve.
", - "GetObjectTaggingOutput$VersionId": "", - "GetObjectTaggingRequest$VersionId": "", + "GetObjectTaggingOutput$VersionId": "The versionId of the object for which you got the tagging information.
", + "GetObjectTaggingRequest$VersionId": "The versionId of the object for which to get the tagging information.
", "HeadObjectOutput$VersionId": "Version of the object.
", "HeadObjectRequest$VersionId": "VersionId used to reference a specific version of the object.
", "ObjectIdentifier$VersionId": "VersionId for the specific version of the object to delete.
", @@ -2480,15 +2510,15 @@ "PutObjectLegalHoldRequest$VersionId": "The version ID of the object that you want to place a Legal Hold on.
", "PutObjectOutput$VersionId": "Version of the object.
", "PutObjectRetentionRequest$VersionId": "The version ID for the object that you want to apply this Object Retention configuration to.
", - "PutObjectTaggingOutput$VersionId": "", - "PutObjectTaggingRequest$VersionId": "", - "RestoreObjectRequest$VersionId": "" + "PutObjectTaggingOutput$VersionId": "The versionId of the object the tag-set was added to.
", + "PutObjectTaggingRequest$VersionId": "The versionId of the object that the tag-set will be added to.
", + "RestoreObjectRequest$VersionId": "VersionId used to reference a specific version of the object.
" } }, "ObjectVersionList": { "base": null, "refs": { - "ListObjectVersionsOutput$Versions": "" + "ListObjectVersionsOutput$Versions": "Container for version information.
" } }, "ObjectVersionStorageClass": { @@ -2511,17 +2541,17 @@ } }, "Owner": { - "base": "", + "base": "Container for the owner's display name and ID
", "refs": { "AccessControlPolicy$Owner": "Container for the bucket owner's display name and ID.
", - "DeleteMarkerEntry$Owner": "", - "GetBucketAclOutput$Owner": "", - "GetObjectAclOutput$Owner": "", - "ListBucketsOutput$Owner": "", - "ListPartsOutput$Owner": "", - "MultipartUpload$Owner": "", - "Object$Owner": "", - "ObjectVersion$Owner": "" + "DeleteMarkerEntry$Owner": "The account that created the delete marker.>
", + "GetBucketAclOutput$Owner": "Container for the bucket owner's display name and ID.
", + "GetObjectAclOutput$Owner": "Container for the bucket owner's display name and ID.
", + "ListBucketsOutput$Owner": "The owner of the buckets listed.
", + "ListPartsOutput$Owner": "Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name.
", + "MultipartUpload$Owner": "Specifies the owner of the object that is part of the multipart upload.
", + "Object$Owner": "The owner of the object
", + "ObjectVersion$Owner": "Specifies the Owner of the object.
" } }, "OwnerOverride": { @@ -2531,13 +2561,13 @@ } }, "ParquetInput": { - "base": "", + "base": "Container for Parquet.
", "refs": { "InputSerialization$Parquet": "Specifies Parquet as object's input serialization format.
" } }, "Part": { - "base": "", + "base": "Container for elements related to a part.
", "refs": { "Parts$member": null } @@ -2556,14 +2586,14 @@ "PartNumberMarker": { "base": null, "refs": { - "ListPartsOutput$PartNumberMarker": "Part number after which listing begins.
", + "ListPartsOutput$PartNumberMarker": "When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.
", "ListPartsRequest$PartNumberMarker": "Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.
" } }, "Parts": { "base": null, "refs": { - "ListPartsOutput$Parts": "" + "ListPartsOutput$Parts": "Container for elements related to a particular part. A response can contain zero or more Part elements.
" } }, "PartsCount": { @@ -2605,24 +2635,24 @@ "AnalyticsAndOperator$Prefix": "The prefix to use when evaluating an AND predicate: The prefix that an object must have to be included in the metrics results.
", "AnalyticsFilter$Prefix": "The prefix to use when evaluating an analytics filter.
", "AnalyticsS3BucketDestination$Prefix": "The prefix to use when exporting data. The prefix is prepended to all results.
", - "CommonPrefix$Prefix": "", + "CommonPrefix$Prefix": "Container for the specified common prefix.
", "InventoryFilter$Prefix": "The prefix that an object must have to be included in the inventory results.
", "InventoryS3BucketDestination$Prefix": "The prefix that is prepended to all inventory results.
", "LifecycleRule$Prefix": "Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.
", - "LifecycleRuleAndOperator$Prefix": "", + "LifecycleRuleAndOperator$Prefix": "Prefix identifying one or more objects to which the rule applies.
", "LifecycleRuleFilter$Prefix": "Prefix identifying one or more objects to which the rule applies.
", "ListMultipartUploadsOutput$Prefix": "When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.
", - "ListMultipartUploadsRequest$Prefix": "Lists in-progress uploads only for those keys that begin with the specified prefix.
", - "ListObjectVersionsOutput$Prefix": "", - "ListObjectVersionsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.
", - "ListObjectsOutput$Prefix": "", + "ListMultipartUploadsRequest$Prefix": "Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.)
", + "ListObjectVersionsOutput$Prefix": "Selects objects that start with the value supplied by this parameter.
", + "ListObjectVersionsRequest$Prefix": "Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes.
", + "ListObjectsOutput$Prefix": "Keys that begin with the indicated prefix.
", "ListObjectsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.
", - "ListObjectsV2Output$Prefix": "Limits the response to keys that begin with the specified prefix.
", + "ListObjectsV2Output$Prefix": "Keys that begin with the indicated prefix.
", "ListObjectsV2Request$Prefix": "Limits the response to keys that begin with the specified prefix.
", "MetricsAndOperator$Prefix": "The prefix used when evaluating an AND predicate.
", "MetricsFilter$Prefix": "The prefix used when evaluating a metrics filter.
", "ReplicationRule$Prefix": "An object keyname prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.
", - "ReplicationRuleAndOperator$Prefix": "", + "ReplicationRuleAndOperator$Prefix": "An object keyname prefix that identifies the subset of objects to which the rule applies.
", "ReplicationRuleFilter$Prefix": "An object keyname prefix that identifies the subset of objects to which the rule applies.
", "Rule$Prefix": "Object key prefix that identifies one or more objects to which this rule applies.
" } @@ -2630,17 +2660,17 @@ "Priority": { "base": null, "refs": { - "ReplicationRule$Priority": "The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:
Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap
Same object qualify tag based filter criteria specified in multiple rules
For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.
" + "ReplicationRule$Priority": "The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:
Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap
Same object qualify tag based filter criteria specified in multiple rules
For more information, see Replication in the Amazon S3 Developer Guide.
" } }, "Progress": { - "base": "", + "base": "This data type contains information about progress of an operation.
", "refs": { "ProgressEvent$Details": "The Progress event details.
" } }, "ProgressEvent": { - "base": "", + "base": "This data type contains information about the progress event of an operation.
", "refs": { "SelectObjectContentEventStream$Progress": "The Progress Event.
" } @@ -2653,7 +2683,7 @@ } }, "PublicAccessBlockConfiguration": { - "base": "Specifies the Block Public Access configuration for an Amazon S3 bucket.
", + "base": "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
", "refs": { "GetPublicAccessBlockOutput$PublicAccessBlockConfiguration": "The PublicAccessBlock
configuration currently in effect for this Amazon S3 bucket.
The PublicAccessBlock
configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.
", - "QueueConfigurationDeprecated$Queue": "" + "QueueConfigurationDeprecated$Queue": "The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.
" } }, "QueueConfiguration": { @@ -2828,9 +2858,9 @@ } }, "QueueConfigurationDeprecated": { - "base": "", + "base": "This data type is deprecated. Please use QueueConfiguration for the same purposes. This dat type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.
", "refs": { - "NotificationConfigurationDeprecated$QueueConfiguration": "" + "NotificationConfigurationDeprecated$QueueConfiguration": "This data type is deprecated. This data type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.
" } }, "QueueConfigurationList": { @@ -2848,21 +2878,21 @@ "QuoteCharacter": { "base": null, "refs": { - "CSVInput$QuoteCharacter": "Value used for escaping where the field delimiter is part of the value.
", - "CSVOutput$QuoteCharacter": "The value used for escaping where the field delimiter is part of the value.
" + "CSVInput$QuoteCharacter": "A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b
, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \"
.
Type: String
Default: \"
Ancestors: CSV
A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b
, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \"
.
The single character used for escaping the quote character inside an already escaped value.
", - "CSVOutput$QuoteEscapeCharacter": "Th single character used for escaping the quote character inside an already escaped value.
" + "CSVInput$QuoteEscapeCharacter": "A single character used for escaping the quotation mark character inside an already escaped value. For example, the value \"\"\" a , b \"\"\" is parsed as \" a , b \".
", + "CSVOutput$QuoteEscapeCharacter": "The single character used for escaping the quote character inside an already escaped value.
" } }, "QuoteFields": { "base": null, "refs": { - "CSVOutput$QuoteFields": "Indicates whether or not all output fields should be quoted.
" + "CSVOutput$QuoteFields": "Indicates whether to use quotation marks around output fields.
ALWAYS
: Always use quotation marks for output fields.
ASNEEDED
: Use quotation marks for output fields when needed.
The value used to separate individual records.
", - "CSVOutput$RecordDelimiter": "The value used to separate individual records.
", + "CSVInput$RecordDelimiter": "A single character used to separate individual records in the input. Instead of the default value, you can specify an arbitrary delimiter.
", + "CSVOutput$RecordDelimiter": "A single character used to separate individual records in the output. Instead of the default value, you can specify an arbitrary delimiter.
", "JSONOutput$RecordDelimiter": "The value used to separate individual records in the output.
" } }, "RecordsEvent": { - "base": "", + "base": "The container for the records event.
", "refs": { "SelectObjectContentEventStream$Records": "The Records Event.
" } @@ -2895,7 +2925,7 @@ "RedirectAllRequestsTo": { "base": "Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.
", "refs": { - "GetBucketWebsiteOutput$RedirectAllRequestsTo": "", + "GetBucketWebsiteOutput$RedirectAllRequestsTo": "Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.
", "WebsiteConfiguration$RedirectAllRequestsTo": "The redirect behavior for every request to this bucket's website endpoint.
If you specify this property, you can't specify any other property.
A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB.
", "refs": { - "GetBucketReplicationOutput$ReplicationConfiguration": "", - "PutBucketReplicationRequest$ReplicationConfiguration": "" + "GetBucketReplicationOutput$ReplicationConfiguration": null, + "PutBucketReplicationRequest$ReplicationConfiguration": null } }, "ReplicationRule": { @@ -2931,7 +2961,7 @@ } }, "ReplicationRuleAndOperator": { - "base": "", + "base": "A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter.
For example:
If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.
If you specify a filter based on multiple tags, wrap the Tag elements in an And tag
A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. For example:
If you specify both a Prefix
and a Tag
filter, wrap these filters in an And
tag.
If you specify a filter based on multiple tags, wrap the Tag
elements in an And
tag.
A filter that identifies the subset of objects to which the replication rule applies. A Filter
must specify exactly one Prefix
, Tag
, or an And
child element.
Amazon S3 can return this if your request involves a bucket that is either a source or destination in a replication rule.
", + "HeadObjectOutput$ReplicationStatus": "Amazon S3 can return this header if your request involves a bucket that is either a source or destination in a replication rule.
In replication you have a source bucket on which you configure replication and destination bucket where Amazon S3 stores object replicas. When you request an object (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will return the x-amz-replication-status header in the response as follows:
If requesting object from the source bucket — Amazon S3 will return the x-amz-replication-status header if object in your request is eligible for replication.
For example, suppose in your replication configuration you specify object prefix \"TaxDocs\" requesting Amazon S3 to replicate objects with key prefix \"TaxDocs\". Then any objects you upload with this key name prefix, for example \"TaxDocs/document1.pdf\", is eligible for replication. For any object request with this key name prefix Amazon S3 will return the x-amz-replication-status header with value PENDING, COMPLETED or FAILED indicating object replication status.
If requesting object from the destination bucket — Amazon S3 will return the x-amz-replication-status header with value REPLICA if object in your request is a replica that Amazon S3 created.
For more information, see Replication.
" + } + }, + "ReplicationTime": { + "base": " A container specifying the time when all objects and operations on objects are replicated. Must be specified together with a Metrics
block.
A container specifying the time when all objects and operations on objects are replicated. Must be specified together with a Metrics
block.
Specifies whether the replication time is enabled.
" + } + }, + "ReplicationTimeValue": { + "base": "A container specifying the time value.
", + "refs": { + "Metrics$EventThreshold": " A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold
event.
A container specifying the time by which replication should complete for all objects and operations on objects.
" } }, "RequestCharged": { @@ -3014,13 +3063,13 @@ } }, "RequestPaymentConfiguration": { - "base": "", + "base": "Container for Payer.
", "refs": { - "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": "" + "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": "Container for Payer.
" } }, "RequestProgress": { - "base": "", + "base": "Container for specifiying if periodic QueryProgress messages should be sent.
", "refs": { "SelectObjectContentRequest$RequestProgress": "Specifies if periodic request progress information should be enabled.
" } @@ -3065,7 +3114,7 @@ "base": null, "refs": { "GetObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy.
", - "HeadObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy.
" + "HeadObjectOutput$Restore": "If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.
If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:
x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 23 Dec 2012 00:00:00 GMT\"
If the object restoration is in progress, the header returns the value ongoing-request=\"true\"
.
For more information about archiving objects, see Transitioning Objects: General Considerations.
" } }, "RestoreObjectOutput": { @@ -3087,7 +3136,7 @@ "RestoreRequest": { "base": "Container for restore job parameters.
", "refs": { - "RestoreObjectRequest$RestoreRequest": "" + "RestoreObjectRequest$RestoreRequest": null } }, "RestoreRequestType": { @@ -3099,7 +3148,7 @@ "Role": { "base": null, "refs": { - "ReplicationConfiguration$Role": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Cross-Region Replication in the Amazon Simple Storage Service Developer Guide.
" + "ReplicationConfiguration$Role": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Replication in the Amazon Simple Storage Service Developer Guide.
" } }, "RoutingRule": { @@ -3111,7 +3160,7 @@ "RoutingRules": { "base": null, "refs": { - "GetBucketWebsiteOutput$RoutingRules": "", + "GetBucketWebsiteOutput$RoutingRules": "Rules that define when a redirect is applied and the redirect behavior.
", "WebsiteConfiguration$RoutingRules": "Rules that define when a redirect is applied and the redirect behavior.
" } }, @@ -3124,14 +3173,14 @@ "Rules": { "base": null, "refs": { - "GetBucketLifecycleOutput$Rules": "", - "LifecycleConfiguration$Rules": "" + "GetBucketLifecycleOutput$Rules": "Container for a lifecycle rule.
", + "LifecycleConfiguration$Rules": "Specifies lifecycle configuration rules for an Amazon S3 bucket.
" } }, "S3KeyFilter": { "base": "A container for object key name prefix and suffix filtering rules.
", "refs": { - "NotificationConfigurationFilter$Key": "" + "NotificationConfigurationFilter$Key": null } }, "S3Location": { @@ -3213,20 +3262,20 @@ "SSEKMSKeyId": { "base": null, "refs": { - "CompleteMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", - "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", + "CompleteMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", + "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", "CopyObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
", - "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", + "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
", "Encryption$KMSKeyId": "If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.
", - "GetObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", - "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", - "PutObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", - "PutObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
", - "SSEKMS$KeyId": "Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.
", + "GetObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", + "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", + "PutObjectOutput$SSEKMSKeyId": "If the x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", + "PutObjectRequest$SSEKMSKeyId": "If the x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.
If the value of x-amz-server-side-encryption is aws:kms, this header specifies the ID of the AWS KMS CMK that will be used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS to protect the data.
", + "SSEKMS$KeyId": "Specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) to use for encrypting Inventory reports.
", "ServerSideEncryptionByDefault$KMSMasterKeyID": "KMS master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm
is set to aws:kms
.
If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
", - "UploadPartOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.
" + "UploadPartCopyOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.
", + "UploadPartOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) was used for the object.
" } }, "SSES3": { @@ -3238,13 +3287,13 @@ "ScanRange": { "base": null, "refs": { - "SelectObjectContentRequest$ScanRange": "Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.
" + "SelectObjectContentRequest$ScanRange": "Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.
ScanRange
may be used in the following ways:
<scanrange><start>50</start><end>100</end></scanrange>
- process only the records starting between the bytes 50 and 100 (inclusive, counting from zero)
<scanrange><start>50</start></scanrange>
- process only the records starting after the byte 50
<scanrange><end>50</end></scanrange>
- process only the records within the last 50 bytes of the file.
The continer for selecting objects from a content event stream.
", "refs": { - "SelectObjectContentOutput$Payload": "" + "SelectObjectContentOutput$Payload": "The array of results.
" } }, "SelectObjectContentOutput": { @@ -3266,15 +3315,15 @@ "ServerSideEncryption": { "base": null, "refs": { - "CompleteMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", + "CompleteMultipartUploadOutput$ServerSideEncryption": "If you specified server-side encryption either with an Amazon S3-managed encryption key or an AWS KMS customer master key (CMK) in your initiate multipart upload request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", "CopyObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", "CopyObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", "CreateMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", "CreateMultipartUploadRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", "Encryption$EncryptionType": "The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).
", "GetObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", - "HeadObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", - "PutObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", + "HeadObjectOutput$ServerSideEncryption": "If the object is stored using server-side encryption either with an AWS KMS customer master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with the value of the Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", + "PutObjectOutput$ServerSideEncryption": "If you specified server-side encryption either with an AWS KMS customer master key (CMK) or Amazon S3-managed encryption key in your PUT request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", "PutObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", "ServerSideEncryptionByDefault$SSEAlgorithm": "Server-side encryption algorithm to use for the default encryption.
", "UploadPartCopyOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).
", @@ -3290,8 +3339,8 @@ "ServerSideEncryptionConfiguration": { "base": "Specifies the default server-side-encryption configuration.
", "refs": { - "GetBucketEncryptionOutput$ServerSideEncryptionConfiguration": "", - "PutBucketEncryptionRequest$ServerSideEncryptionConfiguration": "" + "GetBucketEncryptionOutput$ServerSideEncryptionConfiguration": null, + "PutBucketEncryptionRequest$ServerSideEncryptionConfiguration": null } }, "ServerSideEncryptionRule": { @@ -3309,7 +3358,7 @@ "Setting": { "base": null, "refs": { - "PublicAccessBlockConfiguration$BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE
causes the following behavior:
PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.
PUT Object calls fail if the request includes a public ACL.
Enabling this setting doesn't affect existing policies or ACLs.
", + "PublicAccessBlockConfiguration$BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE
causes the following behavior:
PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.
PUT Object calls fail if the request includes a public ACL.
PUT Bucket calls fail if the request includes a public ACL.
Enabling this setting doesn't affect existing policies or ACLs.
", "PublicAccessBlockConfiguration$IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE
causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.
Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.
", "PublicAccessBlockConfiguration$BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE
causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.
Enabling this setting doesn't affect existing bucket policies.
", "PublicAccessBlockConfiguration$RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE
restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
" @@ -3318,15 +3367,15 @@ "Size": { "base": null, "refs": { - "Object$Size": "", + "Object$Size": "Size in bytes of the object
", "ObjectVersion$Size": "Size in bytes of the object.
", "Part$Size": "Size in bytes of the uploaded part data.
" } }, "SourceSelectionCriteria": { - "base": "A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).
", + "base": "A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).
", "refs": { - "ReplicationRule$SourceSelectionCriteria": "A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).
" + "ReplicationRule$SourceSelectionCriteria": "A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).
" } }, "SseKmsEncryptedObjects": { @@ -3338,30 +3387,30 @@ "SseKmsEncryptedObjectsStatus": { "base": null, "refs": { - "SseKmsEncryptedObjects$Status": "Specifies whether Amazon S3 replicates objects created with server-side encryption using an AWS KMS-managed key.
" + "SseKmsEncryptedObjects$Status": "Specifies whether Amazon S3 replicates objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service.
" } }, "Start": { "base": null, "refs": { - "ScanRange$Start": "Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0.
" + "ScanRange$Start": "Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0. If only start is supplied, it means scan from that point to the end of the file.For example; <scanrange><start>50</start></scanrange>
means scan from byte 50 until the end of the file.
StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket
", - "ListObjectsV2Request$StartAfter": "StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket
" + "ListObjectsV2Output$StartAfter": "If StartAfter was sent with the request, it is included in the response.
", + "ListObjectsV2Request$StartAfter": "StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket.
" } }, "Stats": { - "base": "", + "base": "Container for the stats details.
", "refs": { "StatsEvent$Details": "The Stats event details.
" } }, "StatsEvent": { - "base": "", + "base": "Container for the Stats Event.
", "refs": { "SelectObjectContentEventStream$Stats": "The Stats Event.
" } @@ -3372,11 +3421,11 @@ "CopyObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.
", "CreateMultipartUploadRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.
", "Destination$StorageClass": "The storage class to use when replicating objects, such as standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.
For valid values, see the StorageClass
element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.
The class of storage used to store the object.
", + "GetObjectOutput$StorageClass": "Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.
", + "HeadObjectOutput$StorageClass": "Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.
For more information, see Storage Classes.
", + "ListPartsOutput$StorageClass": "Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.
", "MultipartUpload$StorageClass": "The class of storage used to store the object.
", - "PutObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.
", + "PutObjectRequest$StorageClass": "If you don't specify, Standard is the default storage class. Amazon S3 supports other storage classes.
", "S3Location$StorageClass": "The class of storage used to store the restore results.
" } }, @@ -3387,7 +3436,7 @@ } }, "StorageClassAnalysisDataExport": { - "base": "", + "base": "Container for data related to the storage class analysis for an Amazon S3 bucket for export.
", "refs": { "StorageClassAnalysis$DataExport": "Specifies how data related to the storage class analysis for an Amazon S3 bucket should be exported.
" } @@ -3405,7 +3454,7 @@ } }, "Tag": { - "base": "", + "base": "A container of a key value name pair.
", "refs": { "AnalyticsFilter$Tag": "The tag to use when evaluating an analytics filter.
", "LifecycleRuleFilter$Tag": "This tag must exist in the object's tag set in order for the rule to apply.
", @@ -3424,19 +3473,19 @@ "base": null, "refs": { "AnalyticsAndOperator$Tags": "The list of tags to use when evaluating an AND predicate.
", - "GetBucketTaggingOutput$TagSet": "", - "GetObjectTaggingOutput$TagSet": "", + "GetBucketTaggingOutput$TagSet": "Contains the tag set.
", + "GetObjectTaggingOutput$TagSet": "Contains the tag set.
", "LifecycleRuleAndOperator$Tags": "All of these tags must exist in the object's tag set in order for the rule to apply.
", "MetricsAndOperator$Tags": "The list of tags used when evaluating an AND predicate.
", - "ReplicationRuleAndOperator$Tags": "", - "Tagging$TagSet": "" + "ReplicationRuleAndOperator$Tags": "An array of tags containing key and value pairs.
", + "Tagging$TagSet": "A collection for a a set of tags
" } }, "Tagging": { - "base": "", + "base": "Container for TagSet elements.
", "refs": { - "PutBucketTaggingRequest$Tagging": "", - "PutObjectTaggingRequest$Tagging": "", + "PutBucketTaggingRequest$Tagging": "Container for the TagSet and Tag elements.
", + "PutObjectTaggingRequest$Tagging": "Container for the TagSet and Tag elements
", "S3Location$Tagging": "The tag-set that is applied to the restore results.
" } }, @@ -3461,7 +3510,7 @@ } }, "TargetGrant": { - "base": "", + "base": "Container for granting information.
", "refs": { "TargetGrants$member": null } @@ -3469,7 +3518,7 @@ "TargetGrants": { "base": null, "refs": { - "LoggingEnabled$TargetGrants": "" + "LoggingEnabled$TargetGrants": "Container for granting information.
" } }, "TargetPrefix": { @@ -3488,14 +3537,14 @@ "Token": { "base": null, "refs": { - "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "The ContinuationToken that represents where this request began.
", + "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "The marker that is used as a starting point for this analytics configuration list response. This value is present if it was sent in the request.
", "ListBucketAnalyticsConfigurationsRequest$ContinuationToken": "The ContinuationToken that represents a placeholder from where this request should begin.
", "ListBucketInventoryConfigurationsOutput$ContinuationToken": "If sent in the request, the marker that is used as a starting point for this inventory configuration list response.
", "ListBucketInventoryConfigurationsRequest$ContinuationToken": "The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.
", "ListBucketMetricsConfigurationsOutput$ContinuationToken": "The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.
", "ListBucketMetricsConfigurationsRequest$ContinuationToken": "The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.
", - "ListObjectsV2Output$ContinuationToken": "ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key
", - "ListObjectsV2Request$ContinuationToken": "ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key
" + "ListObjectsV2Output$ContinuationToken": "If ContinuationToken was sent with the request, it is included in the response.
", + "ListObjectsV2Request$ContinuationToken": "ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key.
" } }, "TopicArn": { @@ -3512,9 +3561,9 @@ } }, "TopicConfigurationDeprecated": { - "base": "", + "base": "A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events. This data type is deperecated. Please use TopicConfiguration instead.
", "refs": { - "NotificationConfigurationDeprecated$TopicConfiguration": "" + "NotificationConfigurationDeprecated$TopicConfiguration": "This data type is deperecated. A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events.
" } }, "TopicConfigurationList": { @@ -3526,14 +3575,14 @@ "Transition": { "base": "Specifies when an object transitions to a specified storage class.
", "refs": { - "Rule$Transition": "", + "Rule$Transition": "Specifies when an object transitions to a specified storage class.
", "TransitionList$member": null } }, "TransitionList": { "base": null, "refs": { - "LifecycleRule$Transitions": "" + "LifecycleRule$Transitions": "Specifies when an Amazon S3 object transitions to a specified storage class.
" } }, "TransitionStorageClass": { @@ -3559,7 +3608,7 @@ "base": null, "refs": { "ListMultipartUploadsOutput$UploadIdMarker": "Upload ID after which listing began.
", - "ListMultipartUploadsRequest$UploadIdMarker": "Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.
" + "ListMultipartUploadsRequest$UploadIdMarker": "Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified upload-id-marker.
" } }, "UploadPartCopyOutput": { @@ -3597,20 +3646,20 @@ "VersionIdMarker": { "base": null, "refs": { - "ListObjectVersionsOutput$VersionIdMarker": "", + "ListObjectVersionsOutput$VersionIdMarker": "Marks the last version of the Key returned in a truncated response.
", "ListObjectVersionsRequest$VersionIdMarker": "Specifies the object version you want to start listing from.
" } }, "VersioningConfiguration": { "base": "Describes the versioning state of an Amazon S3 bucket. For more information, see PUT Bucket versioning in the Amazon Simple Storage Service API Reference.
", "refs": { - "PutBucketVersioningRequest$VersioningConfiguration": "" + "PutBucketVersioningRequest$VersioningConfiguration": "Container for setting the versioning state.
" } }, "WebsiteConfiguration": { "base": "Specifies website configuration parameters for an Amazon S3 bucket.
", "refs": { - "PutBucketWebsiteRequest$WebsiteConfiguration": "" + "PutBucketWebsiteRequest$WebsiteConfiguration": "Container for the request.
" } }, "WebsiteRedirectLocation": { @@ -3620,7 +3669,7 @@ "CreateMultipartUploadRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
", "GetObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
", "HeadObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
", - "PutObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
" + "PutObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see .
In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:
x-amz-website-redirect-location: /anotherPage.html
In the following example, the request header sets the object redirect to another website:
x-amz-website-redirect-location: http://www.example.com/
For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects.
" } }, "Years": { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 0732c2fba95..9a43ef6daba 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket.s3.amazonaws.com/" }, "comments": { "input": { @@ -95,19 +98,16 @@ "output": { } }, - "description": "The following example creates a bucket.", - "id": "to-create-a-bucket--1472851826060", - "title": "To create a bucket " + "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", + "id": "to-create-a-bucket-in-a-specific-region-1483399072992", + "title": "To create a bucket in a specific region" }, { "input": { - "Bucket": "examplebucket", - "CreateBucketConfiguration": { - "LocationConstraint": "eu-west-1" - } + "Bucket": "examplebucket" }, "output": { - "Location": "http://examplebucket.s3.amazonaws.com/" + "Location": "/examplebucket" }, "comments": { "input": { @@ -115,9 +115,9 @@ "output": { } }, - "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", - "id": "to-create-a-bucket-in-a-specific-region-1483399072992", - "title": "To create a bucket in a specific region" + "description": "The following example creates a bucket.", + "id": "to-create-a-bucket--1472851826060", + "title": "To create a bucket " } ], "CreateMultipartUpload": [ @@ -334,10 +334,12 @@ "Delete": { "Objects": [ { - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" }, { - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" } ], "Quiet": false @@ -346,14 +348,12 @@ "output": { "Deleted": [ { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" }, { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" } ] }, @@ -363,9 +363,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", - "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", - "title": "To delete multiple objects from a versioned bucket" + "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", + "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", + "title": "To delete multiple object versions from a versioned bucket" }, { "input": { @@ -373,12 +373,10 @@ "Delete": { "Objects": [ { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "Key": "objectkey2" } ], "Quiet": false @@ -387,12 +385,14 @@ "output": { "Deleted": [ { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + "Key": "objectkey2" } ] }, @@ -402,9 +402,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", - "title": "To delete multiple object versions from a versioned bucket" + "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", + "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", + "title": "To delete multiple objects from a versioned bucket" } ], "GetBucketCors": [ @@ -989,37 +989,47 @@ "ListMultipartUploads": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "KeyMarker": "nextkeyfrompreviousresponse", + "MaxUploads": "2", + "UploadIdMarker": "valuefrompreviousresponse" }, "output": { + "Bucket": "acl1", + "IsTruncated": true, + "KeyMarker": "", + "MaxUploads": "2", + "NextKeyMarker": "someobjectkey", + "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "mohanataws", + "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1029,53 +1039,43 @@ "output": { } }, - "description": "The following example lists in-progress multipart uploads on a specific bucket.", - "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", - "title": "To list in-progress multipart uploads on a bucket" + "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", + "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", + "title": "List next set of multipart uploads when previous result is truncated" }, { "input": { - "Bucket": "examplebucket", - "KeyMarker": "nextkeyfrompreviousresponse", - "MaxUploads": "2", - "UploadIdMarker": "valuefrompreviousresponse" + "Bucket": "examplebucket" }, "output": { - "Bucket": "acl1", - "IsTruncated": true, - "KeyMarker": "", - "MaxUploads": "2", - "NextKeyMarker": "someobjectkey", - "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", - "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "mohanataws", - "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "display-name", + "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1085,9 +1085,9 @@ "output": { } }, - "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", - "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", - "title": "List next set of multipart uploads when previous result is truncated" + "description": "The following example lists in-progress multipart uploads on a specific bucket.", + "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", + "title": "To list in-progress multipart uploads on a bucket" } ], "ListObjectVersions": [ @@ -1567,14 +1567,13 @@ "PutObject": [ { "input": { - "ACL": "authenticated-read", "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "exampleobject" + "Key": "objectkey" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" }, "comments": { "input": { @@ -1582,19 +1581,20 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", - "title": "To upload an object and specify canned ACL." + "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-create-an-object-1483147613675", + "title": "To create an object." }, { "input": { - "Body": "HappyFace.jpg", + "Body": "c:\\HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" }, "comments": { "input": { @@ -1602,19 +1602,22 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", + "title": "To upload an object and specify optional tags" }, { "input": { "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "objectkey" + "Key": "exampleobject", + "ServerSideEncryption": "AES256", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" + "ServerSideEncryption": "AES256", + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1622,23 +1625,19 @@ "output": { } }, - "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-create-an-object-1483147613675", - "title": "To create an object." + "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1646,20 +1645,20 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { - "Body": "c:\\HappyFace.jpg", + "ACL": "authenticated-read", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "exampleobject" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" + "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" }, "comments": { "input": { @@ -1667,22 +1666,22 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", - "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", - "title": "To upload an object and specify optional tags" + "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", + "title": "To upload an object and specify canned ACL." }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", + "Key": "HappyFace.jpg", "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1690,22 +1689,23 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Key": "exampleobject", + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" } ], "PutObjectAcl": [ diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 836675ad0fa..8918827c607 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -1195,6 +1195,7 @@ "members":{ "ContainerHostname":{"shape":"ContainerHostname"}, "Image":{"shape":"Image"}, + "Mode":{"shape":"ContainerMode"}, "ModelDataUrl":{"shape":"Url"}, "Environment":{"shape":"EnvironmentMap"}, "ModelPackageName":{"shape":"ArnOrName"} @@ -1210,6 +1211,13 @@ "max":63, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, + "ContainerMode":{ + "type":"string", + "enum":[ + "SingleModel", + "MultiModel" + ] + }, "ContentClassifier":{ "type":"string", "enum":[ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 1f560a01c5b..a5214fb2135 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -16,7 +16,7 @@ "CreateNotebookInstanceLifecycleConfig": "Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", "CreatePresignedNotebookInstanceUrl": "Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open
next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.
IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress
condition operator and the aws:SourceIP
condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.
The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.
Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.
If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.
In the request body, you provide the following:
AlgorithmSpecification
- Identifies the training algorithm to use.
HyperParameters
- Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.
InputDataConfig
- Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.
OutputDataConfig
- Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.
ResourceConfig
- Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.
EnableManagedSpotTraining
- Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.
RoleARN
- The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.
StoppingCondition
- To help cap training costs, use MaxRuntimeInSeconds
to set a time limit for training. Use MaxWaitTimeInSeconds
to specify how long you are willing to to wait for a managed spot training job to complete.
For more information about Amazon SageMaker, see How It Works.
", - "CreateTransformJob": "Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.
To perform batch transformations, you create a transform job and use the data that you have readily available.
In the request body, you provide the following:
TransformJobName
- Identifies the transform job. The name must be unique within an AWS Region in an AWS account.
ModelName
- Identifies the model to use. ModelName
must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.
TransformInput
- Describes the dataset to be transformed and the Amazon S3 location where it is stored.
TransformOutput
- Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.
TransformResources
- Identifies the ML compute instances for the transform job.
For more information about how batch transformation works Amazon SageMaker, see How It Works.
", + "CreateTransformJob": "Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.
To perform batch transformations, you create a transform job and use the data that you have readily available.
In the request body, you provide the following:
TransformJobName
- Identifies the transform job. The name must be unique within an AWS Region in an AWS account.
ModelName
- Identifies the model to use. ModelName
must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.
TransformInput
- Describes the dataset to be transformed and the Amazon S3 location where it is stored.
TransformOutput
- Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.
TransformResources
- Identifies the ML compute instances for the transform job.
For more information about how batch transformation works, see Batch Transform.
", "CreateWorkteam": "Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.
You cannot create more than 25 work teams in an account and region.
", "DeleteAlgorithm": "Removes the specified algorithm from your account.
", "DeleteCodeRepository": "Deletes the specified Git repository from your account.
", @@ -234,7 +234,7 @@ "BatchStrategy": { "base": null, "refs": { - "CreateTransformJobRequest$BatchStrategy": "Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
To enable the batch strategy, you must set SplitType
to Line
, RecordIO
, or TFRecord
.
To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to SingleRecord
and SplitType
to Line
.
To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set BatchStrategy
to MultiRecord
and SplitType
to Line
.
Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
To enable the batch strategy, you must set the SplitType
property of the DataProcessing object to Line
, RecordIO
, or TFRecord
.
To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to SingleRecord
and SplitType
to Line
.
To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set BatchStrategy
to MultiRecord
and SplitType
to Line
.
Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
To enable the batch strategy, you must set SplitType
to Line
, RecordIO
, or TFRecord
.
A string that determines the number of records included in a single mini-batch.
SingleRecord
means only one record is used per mini-batch. MultiRecord
means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB
limit.
The DNS host name for the Docker container.
" } }, + "ContainerMode": { + "base": null, + "refs": { + "ContainerDefinition$Mode": "Specifies whether the container hosts a single model or multiple models.
" + } + }, "ContentClassifier": { "base": null, "refs": { @@ -1224,7 +1230,7 @@ } }, "Filter": { - "base": "A conditional statement for a search expression that includes a Boolean operator, a resource property, and a value.
If you don't specify an Operator
and a Value
, the filter searches for only the specified property. For example, defining a Filter
for the FailureReason
for the TrainingJob
Resource
searches for training job objects that have a value in the FailureReason
field.
If you specify a Value
, but not an Operator
, Amazon SageMaker uses the equals operator as the default.
In search, there are several property types:
To define a metric filter, enter a value using the form \"Metrics.<name>\"
, where <name>
is a metric name. For example, the following filter searches for training jobs with an \"accuracy\"
metric greater than \"0.9\"
:
{
\"Name\": \"Metrics.accuracy\",
\"Operator\": \"GREATER_THAN\",
\"Value\": \"0.9\"
}
To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\"
. Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value
is also a decimal value. If the specified Value
is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\"
hyperparameter that is less than \"0.5\"
:
{
\"Name\": \"HyperParameters.learning_rate\",
\"Operator\": \"LESS_THAN\",
\"Value\": \"0.5\"
}
To define a tag filter, enter a value with the form \"Tags.<key>\"
.
A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value.
If you don't specify an Operator
and a Value
, the filter searches for only the specified property. For example, defining a Filter
for the FailureReason
for the TrainingJob
Resource
searches for training job objects that have a value in the FailureReason
field.
If you specify a Value
, but not an Operator
, Amazon SageMaker uses the equals operator as the default.
In search, there are several property types:
To define a metric filter, enter a value using the form \"Metrics.<name>\"
, where <name>
is a metric name. For example, the following filter searches for training jobs with an \"accuracy\"
metric greater than \"0.9\"
:
{
\"Name\": \"Metrics.accuracy\",
\"Operator\": \"GREATER_THAN\",
\"Value\": \"0.9\"
}
To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\"
. Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value
is also a decimal value. If the specified Value
is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\"
hyperparameter that is less than \"0.5\"
:
{
\"Name\": \"HyperParameters.learning_rate\",
\"Operator\": \"LESS_THAN\",
\"Value\": \"0.5\"
}
To define a tag filter, enter a value with the form \"Tags.<key>\"
.
The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.
Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.
Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.
Logarithmic scaling works only for ranges that have only values greater than 0.
Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.
Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.
The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.
Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.
Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.
Logarithmic scaling works only for ranges that have only values greater than 0.
The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.
Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.
Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.
Logarithmic scaling works only for ranges that have only values greater than 0.
Hyperparameter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.
Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.
The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.
Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.
Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.
Logarithmic scaling works only for ranges that have only values greater than 0.
Specifies the source of the data to join with the transformed data. The valid values are None
and Input
The default value is None
which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource
to Input
.
For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput
. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput
key and the results are stored in SageMakerOutput
.
For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.
" + "DataProcessing$JoinSource": "Specifies the source of the data to join with the transformed data. The valid values are None
and Input
. The default value is None
, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource
to Input
.
For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput
. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput
key and the results are stored in SageMakerOutput
.
For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.
" } }, "JsonPath": { @@ -1562,15 +1568,15 @@ "KmsKeyId": { "base": null, "refs": { - "CreateEndpointConfigInput$KmsKeyId": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
Nitro-based instances do not support encryption with AWS KMS. If any of the models that you specify in the ProductionVariants
parameter use nitro-based instances, do not specify a value for the KmsKeyId
parameter. If you specify a value for KmsKeyId
when using any nitro-based instances, the call to CreateEndpointConfig
fails.
For a list of nitro-based instances, see Nitro-based Instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.
For more information about storage volumes on nitro-based instances, see Amazon EBS and NVMe on Linux Instances.
The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId
when using an instance type with local storage. If any of the models that you specify in the ProductionVariants
parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId
parameter. If you specify a value for KmsKeyId
when using any nitro-based instances with local storage, the call to CreateEndpointConfig
fails.
For a list of instance types that support local instance storage, see Instance Store Volumes.
For more information about local instance storage encryption, see SSD Instance Store Volumes.
The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.
", "DescribeEndpointConfigOutput$KmsKeyId": "AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.
", "DescribeNotebookInstanceOutput$KmsKeyId": "The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.
", "LabelingJobOutputConfig$KmsKeyId": "The AWS Key Management Service ID of the key used to encrypt the output data, if any.
If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt
. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for LabelingJobOutputConfig
. If you use a bucket policy with an s3:PutObject
permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.
The KMS key policy must grant permission to the IAM role that you specify in your CreateLabelingJob
request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
// KMS Key Alias
\"alias/ExampleAlias\"
// Amazon Resource Name (ARN) of a KMS Key Alias
\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt
. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket policy with an s3:PutObject
permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.
The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob
, CreateTransformJob
, or CreateHyperParameterTuningJob
requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
// KMS Key Alias
\"alias/ExampleAlias\"
// Amazon Resource Name (ARN) of a KMS Key Alias
\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.
The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob
request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.
The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.
Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId
when using an instance type with local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
For more information about local instance storage encryption, see SSD Instance Store Volumes.
The VolumeKmsKeyId
can be in any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
// KMS Key Alias
\"alias/ExampleAlias\"
// Amazon Resource Name (ARN) of a KMS Key Alias
\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.
The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.
", "TransformResources$VolumeKmsKeyId": "The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.
", + "base": "A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.
Labeling jobs fail after 30 days with an appropriate client error message.
A set of conditions for stopping the labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.
", "DescribeLabelingJobResponse$StoppingConditions": "A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped.
" @@ -2512,7 +2518,7 @@ "Operator": { "base": null, "refs": { - "Filter$Operator": "A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:
The specified resource in Name
equals the specified Value
.
The specified resource in Name
does not equal the specified Value
.
The specified resource in Name
is greater than the specified Value
. Not supported for text-based properties.
The specified resource in Name
is greater than or equal to the specified Value
. Not supported for text-based properties.
The specified resource in Name
is less than the specified Value
. Not supported for text-based properties.
The specified resource in Name
is less than or equal to the specified Value
. Not supported for text-based properties.
Only supported for text-based properties. The word-list of the property contains the specified Value
.
If you have specified a filter Value
, the default is Equals
.
A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:
The specified resource in Name
equals the specified Value
.
The specified resource in Name
does not equal the specified Value
.
The specified resource in Name
is greater than the specified Value
. Not supported for text-based properties.
The specified resource in Name
is greater than or equal to the specified Value
. Not supported for text-based properties.
The specified resource in Name
is less than the specified Value
. Not supported for text-based properties.
The specified resource in Name
is less than or equal to the specified Value
. Not supported for text-based properties.
Only supported for text-based properties. The word-list of the property contains the specified Value
. A SearchExpression
can include only one Contains
operator.
If you have specified a filter Value
, the default is Equals
.
The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz
.
Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.
", "OutputDataConfig$S3OutputPath": "Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix
.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
[
{\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
]
The preceding JSON matches the following s3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
The complete set of s3uris
in this manifest is the input data for the channel for this datasource. The object that each s3uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
The preceding JSON matches the following s3Uris
:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
The preceding JSON matches the following s3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of s3uris
in this manifest is the input data for the channel for this datasource. The object that each s3uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix
.
For every S3 object used as input for the transform job, batch transform stores the transformed data with an .out
suffix in a corresponding subfolder in the location in the output prefix. For example, for the input data stored at s3://bucket-name/input-name-prefix/dataset01/data.csv
, batch transform stores the transformed data at s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out
. Batch transform doesn't upload partially processed objects. For an input S3 object that contains multiple records, it creates an .out
file only if the transform job succeeds on the entire file. When the input contains multiple S3 objects, the batch transform job processes the listed S3 objects and uploads only the output for successfully processed objects. If any object fails in the transform job batch transform marks the job as failed to prompt investigation.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
[
{\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
]
The preceding JSON matches the following S3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-1
...
The complete set of S3Uris
in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
The preceding JSON matches the following s3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of S3Uris
in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.
" } }, "SearchExpression": { - "base": "A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression
can contain up to twenty elements.
A SearchExpression
contains the following components:
A list of Filter
objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value.
A list of NestedFilter
objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.
A list of SearchExpression
objects. A search expression object can be nested in a list of search expression objects.
A Boolean operator: And
or Or
.
A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression
can contain up to twenty elements.
A SearchExpression
contains the following components:
A list of Filter
objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value. A SearchExpression
can include only one Contains
operator.
A list of NestedFilter
objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.
A list of SearchExpression
objects. A search expression object can be nested in a list of search expression objects.
A Boolean operator: And
or Or
.
A Boolean conditional statement. Resource objects must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions
, NestedFilters
, and Filters
that can be included in a SearchExpression
object is 50.
The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType
is None
, which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line
to split records on a newline character boundary. SplitType
also supports a number of record-oriented binary data formats.
When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy
and MaxPayloadInMB
parameters. When the value of BatchStrategy
is MultiRecord
, Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB
limit. If the value of BatchStrategy
is SingleRecord
, Amazon SageMaker sends individual records in each request.
Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy
is set to SingleRecord
. Padding is not removed if the value of BatchStrategy
is set to MultiRecord
.
For more information about the RecordIO, see Data Format in the MXNet documentation. For more information about the TFRecord, see Consuming TFRecord data in the TensorFlow documentation.
The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType
is None
, which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line
to split records on a newline character boundary. SplitType
also supports a number of record-oriented binary data formats.
When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy
and MaxPayloadInMB
parameters. When the value of BatchStrategy
is MultiRecord
, Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB
limit. If the value of BatchStrategy
is SingleRecord
, Amazon SageMaker sends individual records in each request.
Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy
is set to SingleRecord
. Padding is not removed if the value of BatchStrategy
is set to MultiRecord
.
For more information about RecordIO
, see Create a Dataset Using RecordIO in the MXNet documentation. For more information about TFRecord
, see Consuming TFRecord data in the TensorFlow documentation.
The size of the ML storage volume that you want to provision.
ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File
as the TrainingInputMode
in the algorithm specification.
You must specify sufficient ML storage for your scenario.
Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.
The size of the ML storage volume that you want to provision.
ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File
as the TrainingInputMode
in the algorithm specification.
You must specify sufficient ML storage for your scenario.
Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.
Certain Nitro-based instances include local storage with a fixed total size, dependent on the instance type. When using these instances for training, Amazon SageMaker mounts the local instance storage instead of Amazon EBS gp2 storage. You can't request a VolumeSizeInGB
greater than the total size of the local instance storage.
For a list of instance types that support local instance storage, including the total size per instance type, see Instance Store Volumes.
Welcome to the Amazon SES API v2 Reference. This guide provides information about the Amazon SES API v2, including supported operations, data types, parameters, and schemas.
Amazon SES is an AWS service that you can use to send email messages to your customers.
If you're new to Amazon SES API v2, you might find it helpful to also review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically.
The Amazon SES API v2 is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference.
In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure.
", + "operations": { + "CreateConfigurationSet": "Create a configuration set. Configuration sets are groups of rules that you can apply to the emails that you send. You apply a configuration set to an email by specifying the name of the configuration set when you call the Amazon SES API v2. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
", + "CreateConfigurationSetEventDestination": "Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
A single configuration set can include more than one event destination.
", + "CreateDedicatedIpPool": "Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your AWS account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, the message is sent from one of the addresses in the associated pool.
", + "CreateDeliverabilityTestReport": "Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon SES API v2 then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport
operation to view the results of the test.
Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.
When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.
When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. For some DNS providers, it can take 72 hours or more to complete the domain verification process.
", + "DeleteConfigurationSet": "Delete an existing configuration set.
Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
", + "DeleteConfigurationSetEventDestination": "Delete an event destination.
Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
", + "DeleteDedicatedIpPool": "Delete a dedicated IP pool.
", + "DeleteEmailIdentity": "Deletes an email identity. An identity can be either an email address or a domain name.
", + "GetAccount": "Obtain information about the email-sending status and capabilities of your Amazon SES account in the current AWS Region.
", + "GetBlacklistReports": "Retrieve a list of the blacklists that your dedicated IP addresses appear on.
", + "GetConfigurationSet": "Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.
Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
", + "GetConfigurationSetEventDestinations": "Retrieve a list of event destinations that are associated with a configuration set.
Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
", + "GetDedicatedIp": "Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.
", + "GetDedicatedIps": "List the dedicated IP addresses that are associated with your AWS account.
", + "GetDeliverabilityDashboardOptions": "Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.
", + "GetDeliverabilityTestReport": "Retrieve the results of a predictive inbox placement test.
", + "GetDomainDeliverabilityCampaign": "Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.
", + "GetDomainStatisticsReport": "Retrieve inbox placement and engagement rates for the domains that you use to send email.
", + "GetEmailIdentity": "Provides information about a specific identity, including the identity's verification status, its DKIM authentication status, and its custom Mail-From settings.
", + "ListConfigurationSets": "List all of the configuration sets associated with your account in the current region.
Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
", + "ListDedicatedIpPools": "List all of the dedicated IP pools that exist in your AWS account in the current Region.
", + "ListDeliverabilityTestReports": "Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the GetDeliverabilityTestReport
operation to view the results.
Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.
", + "ListEmailIdentities": "Returns a list of all of the email identities that are associated with your AWS account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't. This operation returns identities that are associated with Amazon SES and Amazon Pinpoint.
", + "ListTagsForResource": "Retrieve a list of the tags (keys and values) that are associated with a specified resource. A tag is a label that you optionally define and associate with a resource. Each tag consists of a required tag key and an optional associated tag value. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
", + "PutAccountDedicatedIpWarmupAttributes": "Enable or disable the automatic warm-up feature for dedicated IP addresses.
", + "PutAccountSendingAttributes": "Enable or disable the ability of your account to send email.
", + "PutConfigurationSetDeliveryOptions": "Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.
", + "PutConfigurationSetReputationOptions": "Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.
", + "PutConfigurationSetSendingOptions": "Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.
", + "PutConfigurationSetTrackingOptions": "Specify a custom domain to use for open and click tracking elements in email that you send.
", + "PutDedicatedIpInPool": "Move a dedicated IP address to an existing dedicated IP pool.
The dedicated IP address that you specify must already exist, and must be associated with your AWS account.
The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool
operation.
Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.
", + "PutEmailIdentityDkimAttributes": "Used to enable or disable DKIM authentication for an email identity.
", + "PutEmailIdentityFeedbackAttributes": "Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.
If the value is true
, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path
header of the original email.
You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).
", + "PutEmailIdentityMailFromAttributes": "Used to enable or disable the custom Mail-From domain configuration for an email identity.
", + "SendEmail": "Sends an email message. You can use the Amazon SES API v2 to send two types of messages:
Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and the Amazon SES API v2 assembles the message for you.
Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.
Add one or more tags (keys and values) to a specified resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
", + "UntagResource": "Remove one or more tags (keys and values) from a specified resource.
", + "UpdateConfigurationSetEventDestination": "Update the configuration of an event destination for a configuration set.
Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
" + }, + "shapes": { + "AccountSuspendedException": { + "base": "The message can't be sent because the account's ability to send email has been permanently restricted.
", + "refs": { + } + }, + "AlreadyExistsException": { + "base": "The resource specified in your request already exists.
", + "refs": { + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "KinesisFirehoseDestination$IamRoleArn": "The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream.
", + "KinesisFirehoseDestination$DeliveryStreamArn": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to.
", + "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource that you want to retrieve tag information for.
", + "PinpointDestination$ApplicationArn": "The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.
", + "SnsDestination$TopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.
", + "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.
", + "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource that you want to remove one or more tags from.
" + } + }, + "BadRequestException": { + "base": "The input you provided is invalid.
", + "refs": { + } + }, + "BehaviorOnMxFailure": { + "base": "The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue
, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage
, the Amazon SES API v2 returns a MailFromDomainNotVerified
error, and doesn't attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending
, Failed
, and TemporaryFailure
states.
The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue
, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage
, the Amazon SES API v2 returns a MailFromDomainNotVerified
error, and doesn't attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending
, Failed
, and TemporaryFailure
states.
The action that you want to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue
, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage
, the Amazon SES API v2 returns a MailFromDomainNotVerified
error, and doesn't attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending
, Failed
, and TemporaryFailure
states.
An object that contains information about a blacklisting event that impacts one of the dedicated IP addresses that is associated with your account.
", + "refs": { + "BlacklistEntries$member": null + } + }, + "BlacklistItemName": { + "base": "An IP address that you want to obtain blacklist information for.
", + "refs": { + "BlacklistItemNames$member": null, + "BlacklistReport$key": null + } + }, + "BlacklistItemNames": { + "base": null, + "refs": { + "GetBlacklistReportsRequest$BlacklistItemNames": "A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon SES or Amazon Pinpoint.
" + } + }, + "BlacklistReport": { + "base": null, + "refs": { + "GetBlacklistReportsResponse$BlacklistReport": "An object that contains information about a blacklist that one of your dedicated IP addresses appears on.
" + } + }, + "BlacklistingDescription": { + "base": "A description of the blacklisting event.
", + "refs": { + "BlacklistEntry$Description": "Additional information about the blacklisting event, as provided by the blacklist maintainer.
" + } + }, + "Body": { + "base": "Represents the body of the email message.
", + "refs": { + "Message$Body": "The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.
" + } + }, + "CampaignId": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$CampaignId": "The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.
", + "GetDomainDeliverabilityCampaignRequest$CampaignId": "The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.
" + } + }, + "Charset": { + "base": null, + "refs": { + "Content$Charset": "The character set for the content. Because of the constraints of the SMTP protocol, the Amazon SES API v2 uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8
, ISO-8859-1
, or Shift_JIS
.
An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
", + "refs": { + "EventDestination$CloudWatchDestination": "An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
", + "EventDestinationDefinition$CloudWatchDestination": "An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
" + } + }, + "CloudWatchDimensionConfiguration": { + "base": "An object that defines the dimension configuration to use when you send email events to Amazon CloudWatch.
", + "refs": { + "CloudWatchDimensionConfigurations$member": null + } + }, + "CloudWatchDimensionConfigurations": { + "base": null, + "refs": { + "CloudWatchDestination$DimensionConfigurations": "An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.
" + } + }, + "ConcurrentModificationException": { + "base": "The resource is being modified by another operation or thread.
", + "refs": { + } + }, + "ConfigurationSetName": { + "base": "The name of a configuration set.
Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
", + "refs": { + "ConfigurationSetNameList$member": null, + "CreateConfigurationSetEventDestinationRequest$ConfigurationSetName": "The name of the configuration set that you want to add an event destination to.
", + "CreateConfigurationSetRequest$ConfigurationSetName": "The name of the configuration set.
", + "DeleteConfigurationSetEventDestinationRequest$ConfigurationSetName": "The name of the configuration set that contains the event destination that you want to delete.
", + "DeleteConfigurationSetRequest$ConfigurationSetName": "The name of the configuration set that you want to delete.
", + "GetConfigurationSetEventDestinationsRequest$ConfigurationSetName": "The name of the configuration set that contains the event destination.
", + "GetConfigurationSetRequest$ConfigurationSetName": "The name of the configuration set that you want to obtain more information about.
", + "GetConfigurationSetResponse$ConfigurationSetName": "The name of the configuration set.
", + "PutConfigurationSetDeliveryOptionsRequest$ConfigurationSetName": "The name of the configuration set that you want to associate with a dedicated IP pool.
", + "PutConfigurationSetReputationOptionsRequest$ConfigurationSetName": "The name of the configuration set that you want to enable or disable reputation metric tracking for.
", + "PutConfigurationSetSendingOptionsRequest$ConfigurationSetName": "The name of the configuration set that you want to enable or disable email sending for.
", + "PutConfigurationSetTrackingOptionsRequest$ConfigurationSetName": "The name of the configuration set that you want to add a custom tracking domain to.
", + "SendEmailRequest$ConfigurationSetName": "The name of the configuration set that you want to use when sending the email.
", + "UpdateConfigurationSetEventDestinationRequest$ConfigurationSetName": "The name of the configuration set that contains the event destination that you want to modify.
" + } + }, + "ConfigurationSetNameList": { + "base": null, + "refs": { + "ListConfigurationSetsResponse$ConfigurationSets": "An array that contains all of the configuration sets in your Amazon SES account in the current AWS Region.
" + } + }, + "Content": { + "base": "An object that represents the content of the email, and optionally a character set specification.
", + "refs": { + "Body$Text": "An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.
", + "Body$Html": "An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.
", + "Message$Subject": "The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047.
" + } + }, + "CreateConfigurationSetEventDestinationRequest": { + "base": "A request to add an event destination to a configuration set.
", + "refs": { + } + }, + "CreateConfigurationSetEventDestinationResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "CreateConfigurationSetRequest": { + "base": "A request to create a configuration set.
", + "refs": { + } + }, + "CreateConfigurationSetResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "CreateDedicatedIpPoolRequest": { + "base": "A request to create a new dedicated IP pool.
", + "refs": { + } + }, + "CreateDedicatedIpPoolResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "CreateDeliverabilityTestReportRequest": { + "base": "A request to perform a predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. We send that message to special email addresses spread across several major email providers around the world. The test takes about 24 hours to complete. When the test is complete, you can use the GetDeliverabilityTestReport
operation to view the results of the test.
Information about the predictive inbox placement test that you created.
", + "refs": { + } + }, + "CreateEmailIdentityRequest": { + "base": "A request to begin the verification process for an email identity (an email address or domain).
", + "refs": { + } + }, + "CreateEmailIdentityResponse": { + "base": "If the email identity is a domain, this object contains tokens that you can use to create a set of CNAME records. To sucessfully verify your domain, you have to add these records to the DNS configuration for your domain.
If the email identity is an email address, this object is empty.
", + "refs": { + } + }, + "CustomRedirectDomain": { + "base": "The domain that you want to use for tracking open and click events.
", + "refs": { + "PutConfigurationSetTrackingOptionsRequest$CustomRedirectDomain": "The domain that you want to use to track open and click events.
", + "TrackingOptions$CustomRedirectDomain": "The domain that you want to use for tracking open and click events.
" + } + }, + "DailyVolume": { + "base": "An object that contains information about the volume of email sent on each day of the analysis period.
", + "refs": { + "DailyVolumes$member": null + } + }, + "DailyVolumes": { + "base": null, + "refs": { + "GetDomainStatisticsReportResponse$DailyVolumes": "An object that contains deliverability metrics for the domain that you specified. This object contains data for each day, starting on the StartDate
and ending on the EndDate
.
Contains information about a dedicated IP address that is associated with your Amazon SES API v2 account.
To learn more about requesting dedicated IP addresses, see Requesting and Relinquishing Dedicated IP Addresses in the Amazon SES Developer Guide.
", + "refs": { + "DedicatedIpList$member": null, + "GetDedicatedIpResponse$DedicatedIp": "An object that contains information about a dedicated IP address.
" + } + }, + "DedicatedIpList": { + "base": "A list of dedicated IP addresses that are associated with your AWS account.
", + "refs": { + "GetDedicatedIpsResponse$DedicatedIps": "A list of dedicated IP addresses that are associated with your AWS account.
" + } + }, + "DefaultDimensionValue": { + "base": "The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
A request to delete an event destination from a configuration set.
", + "refs": { + } + }, + "DeleteConfigurationSetEventDestinationResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "DeleteConfigurationSetRequest": { + "base": "A request to delete a configuration set.
", + "refs": { + } + }, + "DeleteConfigurationSetResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "DeleteDedicatedIpPoolRequest": { + "base": "A request to delete a dedicated IP pool.
", + "refs": { + } + }, + "DeleteDedicatedIpPoolResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "DeleteEmailIdentityRequest": { + "base": "A request to delete an existing email identity. When you delete an identity, you lose the ability to send email from that identity. You can restore your ability to send email by completing the verification process for the identity again.
", + "refs": { + } + }, + "DeleteEmailIdentityResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "DeliverabilityDashboardAccountStatus": { + "base": "The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION
, your subscription is scheduled to expire at the end of the current calendar month.
The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION
, your subscription is scheduled to expire at the end of the current calendar month.
An object that contains metadata related to a predictive inbox placement test.
", + "refs": { + "DeliverabilityTestReports$member": null, + "GetDeliverabilityTestReportResponse$DeliverabilityTestReport": "An object that contains the results of the predictive inbox placement test.
" + } + }, + "DeliverabilityTestReports": { + "base": null, + "refs": { + "ListDeliverabilityTestReportsResponse$DeliverabilityTestReports": "An object that contains a lists of predictive inbox placement tests that you've performed.
" + } + }, + "DeliverabilityTestStatus": { + "base": "The status of a predictive inbox placement test. If the status is IN_PROGRESS
, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE
, then the test is finished, and you can use the GetDeliverabilityTestReport
operation to view the results of the test.
The status of the predictive inbox placement test. If the status is IN_PROGRESS
, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE
, then the test is finished, and you can use the GetDeliverabilityTestReport
to view the results of the test.
The status of the predictive inbox placement test. If the status is IN_PROGRESS
, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE
, then the test is finished, and you can use the GetDeliverabilityTestReport
to view the results of the test.
The subject line for an email that you submitted in a predictive inbox placement test.
", + "refs": { + "DeliverabilityTestReport$Subject": "The subject line for an email that you submitted in a predictive inbox placement test.
" + } + }, + "DeliveryOptions": { + "base": "Used to associate a configuration set with a dedicated IP pool.
", + "refs": { + "CreateConfigurationSetRequest$DeliveryOptions": "An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.
", + "GetConfigurationSetResponse$DeliveryOptions": "An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.
" + } + }, + "Destination": { + "base": "An object that describes the recipients for an email.
", + "refs": { + "SendEmailRequest$Destination": "An object that contains the recipients of the email message.
" + } + }, + "DimensionName": { + "base": "The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:
It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS
header or a parameter to the SendEmail
or SendRawEmail
API, choose messageTag
. If you want to use your own email headers, choose emailHeader
. If you want to use link tags, choose linkTags
.
The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS
header or a parameter to the SendEmail
or SendRawEmail
API, choose messageTag
. If you want to use your own email headers, choose emailHeader
. If you want to use link tags, choose linkTags
.
An object that contains information about the DKIM configuration for an email identity.
", + "refs": { + "CreateEmailIdentityResponse$DkimAttributes": "An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.
", + "GetEmailIdentityResponse$DkimAttributes": "An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.
" + } + }, + "DkimStatus": { + "base": "The DKIM authentication status of the identity. The status can be one of the following:
PENDING
– The DKIM verification process was initiated, and Amazon SES hasn't yet detected the CNAME records in the DNS configuration for the domain.
SUCCESS
– The DKIM authentication process completed successfully.
FAILED
– The DKIM authentication process failed. This can happen when Amazon SES fails to find the required CNAME records in the DNS configuration of the domain.
TEMPORARY_FAILURE
– A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.
NOT_STARTED
– The DKIM verification process hasn't been initiated for the domain.
Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:
PENDING
– Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.
SUCCESS
– Amazon SES located the DKIM records in the DNS configuration for the domain and determined that they're correct. You can now send DKIM-signed email from the identity.
FAILED
– Amazon SES wasn't able to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.
TEMPORARY_FAILURE
– A temporary issue occurred, which prevented Amazon SES from determining the DKIM status for the domain.
NOT_STARTED
– Amazon SES hasn't yet started searching for the DKIM records in the DKIM records for the domain.
A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon SES usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.
" + } + }, + "Domain": { + "base": null, + "refs": { + "DomainDeliverabilityTrackingOption$Domain": "A verified domain that’s associated with your AWS account and currently has an active Deliverability dashboard subscription.
", + "ListDomainDeliverabilityCampaignsRequest$SubscribedDomain": "The domain to obtain deliverability data for.
" + } + }, + "DomainDeliverabilityCampaign": { + "base": "An object that contains the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption
operation).
An object that contains the deliverability data for the campaign.
" + } + }, + "DomainDeliverabilityCampaignList": { + "base": "", + "refs": { + "ListDomainDeliverabilityCampaignsResponse$DomainDeliverabilityCampaigns": "An array of responses, one for each campaign that used the domain to send email during the specified time range.
" + } + }, + "DomainDeliverabilityTrackingOption": { + "base": "An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.
", + "refs": { + "DomainDeliverabilityTrackingOptions$member": null + } + }, + "DomainDeliverabilityTrackingOptions": { + "base": "An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.
", + "refs": { + "GetDeliverabilityDashboardOptionsResponse$ActiveSubscribedDomains": "An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that isn’t scheduled to expire at the end of the current calendar month.
", + "GetDeliverabilityDashboardOptionsResponse$PendingExpirationSubscribedDomains": "An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that's scheduled to expire at the end of the current calendar month.
", + "PutDeliverabilityDashboardOptionRequest$SubscribedDomains": "An array of objects, one for each verified domain that you use to send email and enabled the Deliverability dashboard for.
" + } + }, + "DomainIspPlacement": { + "base": "An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.
", + "refs": { + "DomainIspPlacements$member": null + } + }, + "DomainIspPlacements": { + "base": null, + "refs": { + "DailyVolume$DomainIspPlacements": "An object that contains inbox placement metrics for a specified day in the analysis period, broken out by the recipient's email provider.
", + "OverallVolume$DomainIspPlacements": "An object that contains inbox and junk mail placement metrics for individual email providers.
" + } + }, + "EmailAddress": { + "base": null, + "refs": { + "CreateDeliverabilityTestReportRequest$FromEmailAddress": "The email address that the predictive inbox placement test email was sent from.
", + "DeliverabilityTestReport$FromEmailAddress": "The sender address that you specified for the predictive inbox placement test.
", + "EmailAddressList$member": null, + "SendEmailRequest$FromEmailAddress": "The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.
", + "SendEmailRequest$FeedbackForwardingEmailAddress": "The address that you want bounce and complaint notifications to be sent to.
" + } + }, + "EmailAddressList": { + "base": null, + "refs": { + "Destination$ToAddresses": "An array that contains the email addresses of the \"To\" recipients for the email.
", + "Destination$CcAddresses": "An array that contains the email addresses of the \"CC\" (carbon copy) recipients for the email.
", + "Destination$BccAddresses": "An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.
", + "SendEmailRequest$ReplyToAddresses": "The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.
" + } + }, + "EmailContent": { + "base": "An object that defines the entire content of the email, including the message headers and the body content. You can create a simple email message, in which you specify the subject and the text and HTML versions of the message body. You can also create raw messages, in which you specify a complete MIME-formatted message. Raw messages can include attachments and custom headers.
", + "refs": { + "CreateDeliverabilityTestReportRequest$Content": "The HTML body of the message that you sent when you performed the predictive inbox placement test.
", + "SendEmailRequest$Content": "An object that contains the body of the message. You can send either a Simple message or a Raw message.
" + } + }, + "Enabled": { + "base": null, + "refs": { + "CreateEmailIdentityResponse$VerifiedForSendingStatus": "Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.
", + "DkimAttributes$SigningEnabled": "If the value is true
, then the messages that you send from the identity are signed using DKIM. If the value is false
, then the messages that you send from the identity aren't DKIM-signed.
If true
, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition
.
If false
, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.
If true
, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition
.
If false
, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.
Indicates whether or not email sending is enabled for your Amazon SES account in the current AWS Region.
", + "GetAccountResponse$DedicatedIpAutoWarmupEnabled": "Indicates whether or not the automatic warm-up feature is enabled for dedicated IP addresses that are associated with your account.
", + "GetAccountResponse$ProductionAccessEnabled": "Indicates whether or not your account has production access in the current AWS Region.
If the value is false
, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.
If the value is true
, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.
Specifies whether the Deliverability dashboard is enabled. If this value is true
, the dashboard is enabled.
The feedback forwarding configuration for the identity.
If the value is true
, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path
header of the original email.
You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).
", + "GetEmailIdentityResponse$VerifiedForSendingStatus": "Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.
", + "IdentityInfo$SendingEnabled": "Indicates whether or not you can send email from the identity.
An identity is an email address or domain that you send email from. Before you can send email from an identity, you have to demostrate that you own the identity, and that you authorize Amazon SES to send email from that identity.
", + "InboxPlacementTrackingOption$Global": "Specifies whether inbox placement data is being tracked for the domain.
", + "PutAccountDedicatedIpWarmupAttributesRequest$AutoWarmupEnabled": "Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon SES account in the current AWS Region. Set to true
to enable the automatic warm-up feature, or set to false
to disable it.
Enables or disables your account's ability to send email. Set to true
to enable email sending, or set to false
to disable email sending.
If AWS paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.
If true
, tracking of reputation metrics is enabled for the configuration set. If false
, tracking of reputation metrics is disabled for the configuration set.
If true
, email sending is enabled for the configuration set. If false
, email sending is disabled for the configuration set.
Specifies whether to enable the Deliverability dashboard. To enable the dashboard, set this value to true
.
Sets the DKIM signing configuration for the identity.
When you set this value true
, then the messages that are sent from the identity are signed using DKIM. If you set this value to false
, your messages are sent without DKIM signing.
Sets the feedback forwarding configuration for the identity.
If the value is true
, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path
header of the original email.
You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).
", + "ReputationOptions$ReputationMetricsEnabled": "If true
, tracking of reputation metrics is enabled for the configuration set. If false
, tracking of reputation metrics is disabled for the configuration set.
If true
, email sending is enabled for the configuration set. If false
, email sending is disabled for the configuration set.
The major email providers who handled the email message.
" + } + }, + "EventDestination": { + "base": "In the Amazon SES API v2, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
", + "refs": { + "EventDestinations$member": null + } + }, + "EventDestinationDefinition": { + "base": "An object that defines the event destination. Specifically, it defines which services receive events from emails sent using the configuration set that the event destination is associated with. Also defines the types of events that are sent to the event destination.
", + "refs": { + "CreateConfigurationSetEventDestinationRequest$EventDestination": "An object that defines the event destination.
", + "UpdateConfigurationSetEventDestinationRequest$EventDestination": "An object that defines the event destination.
" + } + }, + "EventDestinationName": { + "base": "The name of an event destination.
Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
", + "refs": { + "CreateConfigurationSetEventDestinationRequest$EventDestinationName": "A name that identifies the event destination within the configuration set.
", + "DeleteConfigurationSetEventDestinationRequest$EventDestinationName": "The name of the event destination that you want to delete.
", + "EventDestination$Name": "A name that identifies the event destination.
", + "UpdateConfigurationSetEventDestinationRequest$EventDestinationName": "The name of the event destination that you want to modify.
" + } + }, + "EventDestinations": { + "base": null, + "refs": { + "GetConfigurationSetEventDestinationsResponse$EventDestinations": "An array that includes all of the events destinations that have been configured for the configuration set.
" + } + }, + "EventType": { + "base": "An email sending event type. For example, email sends, opens, and bounces are all email events.
", + "refs": { + "EventTypes$member": null + } + }, + "EventTypes": { + "base": null, + "refs": { + "EventDestination$MatchingEventTypes": "The types of events that Amazon SES sends to the specified event destinations.
", + "EventDestinationDefinition$MatchingEventTypes": "An array that specifies which events the Amazon SES API v2 should send to the destinations in this EventDestinationDefinition
.
The reputation status of your Amazon SES account. The status can be one of the following:
HEALTHY
– There are no reputation-related issues that currently impact your account.
PROBATION
– We've identified potential issues with your Amazon SES account. We're placing your account under review while you work on correcting these issues.
SHUTDOWN
– Your account's ability to send email is currently paused because of an issue with the email sent from your account. When you correct the issue, you can contact us and request that your account's ability to send email is resumed.
A request to obtain information about the email-sending capabilities of your Amazon SES account.
", + "refs": { + } + }, + "GetAccountResponse": { + "base": "A list of details about the email-sending capabilities of your Amazon SES account in the current AWS Region.
", + "refs": { + } + }, + "GetBlacklistReportsRequest": { + "base": "A request to retrieve a list of the blacklists that your dedicated IP addresses appear on.
", + "refs": { + } + }, + "GetBlacklistReportsResponse": { + "base": "An object that contains information about blacklist events.
", + "refs": { + } + }, + "GetConfigurationSetEventDestinationsRequest": { + "base": "A request to obtain information about the event destinations for a configuration set.
", + "refs": { + } + }, + "GetConfigurationSetEventDestinationsResponse": { + "base": "Information about an event destination for a configuration set.
", + "refs": { + } + }, + "GetConfigurationSetRequest": { + "base": "A request to obtain information about a configuration set.
", + "refs": { + } + }, + "GetConfigurationSetResponse": { + "base": "Information about a configuration set.
", + "refs": { + } + }, + "GetDedicatedIpRequest": { + "base": "A request to obtain more information about a dedicated IP address.
", + "refs": { + } + }, + "GetDedicatedIpResponse": { + "base": "Information about a dedicated IP address.
", + "refs": { + } + }, + "GetDedicatedIpsRequest": { + "base": "A request to obtain more information about dedicated IP pools.
", + "refs": { + } + }, + "GetDedicatedIpsResponse": { + "base": "Information about the dedicated IP addresses that are associated with your AWS account.
", + "refs": { + } + }, + "GetDeliverabilityDashboardOptionsRequest": { + "base": "Retrieve information about the status of the Deliverability dashboard for your AWS account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.
", + "refs": { + } + }, + "GetDeliverabilityDashboardOptionsResponse": { + "base": "An object that shows the status of the Deliverability dashboard.
", + "refs": { + } + }, + "GetDeliverabilityTestReportRequest": { + "base": "A request to retrieve the results of a predictive inbox placement test.
", + "refs": { + } + }, + "GetDeliverabilityTestReportResponse": { + "base": "The results of the predictive inbox placement test.
", + "refs": { + } + }, + "GetDomainDeliverabilityCampaignRequest": { + "base": "Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption
operation).
An object that contains all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.
", + "refs": { + } + }, + "GetDomainStatisticsReportRequest": { + "base": "A request to obtain deliverability metrics for a domain.
", + "refs": { + } + }, + "GetDomainStatisticsReportResponse": { + "base": "An object that includes statistics that are related to the domain that you specified.
", + "refs": { + } + }, + "GetEmailIdentityRequest": { + "base": "A request to return details about an email identity.
", + "refs": { + } + }, + "GetEmailIdentityResponse": { + "base": "Details about an email identity.
", + "refs": { + } + }, + "Identity": { + "base": null, + "refs": { + "CreateEmailIdentityRequest$EmailIdentity": "The email address or domain that you want to verify.
", + "DeleteEmailIdentityRequest$EmailIdentity": "The identity (that is, the email address or domain) that you want to delete.
", + "DomainDeliverabilityCampaign$FromAddress": "The verified email address that the email message was sent from.
", + "GetDomainStatisticsReportRequest$Domain": "The domain that you want to obtain deliverability metrics for.
", + "GetEmailIdentityRequest$EmailIdentity": "The email identity that you want to retrieve details for.
", + "IdentityInfo$IdentityName": "The address or domain of the identity.
", + "PutEmailIdentityDkimAttributesRequest$EmailIdentity": "The email identity that you want to change the DKIM settings for.
", + "PutEmailIdentityFeedbackAttributesRequest$EmailIdentity": "The email identity that you want to configure bounce and complaint feedback forwarding for.
", + "PutEmailIdentityMailFromAttributesRequest$EmailIdentity": "The verified email identity that you want to set up the custom MAIL FROM domain for.
" + } + }, + "IdentityInfo": { + "base": "Information about an email identity.
", + "refs": { + "IdentityInfoList$member": null + } + }, + "IdentityInfoList": { + "base": null, + "refs": { + "ListEmailIdentitiesResponse$EmailIdentities": "An array that includes all of the email identities associated with your AWS account.
" + } + }, + "IdentityType": { + "base": "The email identity type. The identity type can be one of the following:
EMAIL_ADDRESS
– The identity is an email address.
DOMAIN
– The identity is a domain.
The email identity type.
", + "GetEmailIdentityResponse$IdentityType": "The email identity type.
", + "IdentityInfo$IdentityType": "The email identity type. The identity type can be one of the following:
EMAIL_ADDRESS
– The identity is an email address.
DOMAIN
– The identity is a domain.
MANAGED_DOMAIN
– The identity is a domain that is managed by AWS.
The URL of an image that contains a snapshot of the email message that was sent.
" + } + }, + "InboxPlacementTrackingOption": { + "base": "An object that contains information about the inbox placement data settings for a verified domain that’s associated with your AWS account. This data is available only if you enabled the Deliverability dashboard for the domain.
", + "refs": { + "DomainDeliverabilityTrackingOption$InboxPlacementTrackingOption": "An object that contains information about the inbox placement data settings for the domain.
" + } + }, + "Ip": { + "base": "An IPv4 address.
", + "refs": { + "DedicatedIp$Ip": "An IPv4 address.
", + "GetDedicatedIpRequest$Ip": "The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your AWS account.
", + "IpList$member": null, + "PutDedicatedIpInPoolRequest$Ip": "The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your AWS account.
", + "PutDedicatedIpWarmupAttributesRequest$Ip": "The dedicated IP address that you want to update the warm-up attributes for.
" + } + }, + "IpList": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$SendingIps": "The IP addresses that were used to send the email message.
" + } + }, + "IspName": { + "base": "The name of an email provider.
", + "refs": { + "DomainIspPlacement$IspName": "The name of the email provider that the inbox placement data applies to.
", + "IspNameList$member": null, + "IspPlacement$IspName": "The name of the email provider that the inbox placement data applies to.
" + } + }, + "IspNameList": { + "base": null, + "refs": { + "InboxPlacementTrackingOption$TrackedIsps": "An array of strings, one for each major email provider that the inbox placement data applies to.
" + } + }, + "IspPlacement": { + "base": "An object that describes how email sent during the predictive inbox placement test was handled by a certain email provider.
", + "refs": { + "IspPlacements$member": null + } + }, + "IspPlacements": { + "base": null, + "refs": { + "GetDeliverabilityTestReportResponse$IspPlacements": "An object that describes how the test email was handled by several email providers, including Gmail, Hotmail, Yahoo, AOL, and others.
" + } + }, + "KinesisFirehoseDestination": { + "base": "An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
", + "refs": { + "EventDestination$KinesisFirehoseDestination": "An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
", + "EventDestinationDefinition$KinesisFirehoseDestination": "An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
" + } + }, + "LastFreshStart": { + "base": "The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.
", + "refs": { + "ReputationOptions$LastFreshStart": "The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.
" + } + }, + "LimitExceededException": { + "base": "There are too many instances of the specified resource type.
", + "refs": { + } + }, + "ListConfigurationSetsRequest": { + "base": "A request to obtain a list of configuration sets for your Amazon SES account in the current AWS Region.
", + "refs": { + } + }, + "ListConfigurationSetsResponse": { + "base": "A list of configuration sets in your Amazon SES account in the current AWS Region.
", + "refs": { + } + }, + "ListDedicatedIpPoolsRequest": { + "base": "A request to obtain a list of dedicated IP pools.
", + "refs": { + } + }, + "ListDedicatedIpPoolsResponse": { + "base": "A list of dedicated IP pools.
", + "refs": { + } + }, + "ListDeliverabilityTestReportsRequest": { + "base": "A request to list all of the predictive inbox placement tests that you've performed.
", + "refs": { + } + }, + "ListDeliverabilityTestReportsResponse": { + "base": "A list of the predictive inbox placement test reports that are available for your account, regardless of whether or not those tests are complete.
", + "refs": { + } + }, + "ListDomainDeliverabilityCampaignsRequest": { + "base": "Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard.
", + "refs": { + } + }, + "ListDomainDeliverabilityCampaignsResponse": { + "base": "An array of objects that provide deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.
", + "refs": { + } + }, + "ListEmailIdentitiesRequest": { + "base": "A request to list all of the email identities associated with your AWS account. This list includes identities that you've already verified, identities that are unverified, and identities that were verified in the past, but are no longer verified.
", + "refs": { + } + }, + "ListEmailIdentitiesResponse": { + "base": "A list of all of the identities that you've attempted to verify, regardless of whether or not those identities were successfully verified.
", + "refs": { + } + }, + "ListOfDedicatedIpPools": { + "base": "A list of dedicated IP pools that are associated with your AWS account.
", + "refs": { + "ListDedicatedIpPoolsResponse$DedicatedIpPools": "A list of all of the dedicated IP pools that are associated with your AWS account in the current Region.
" + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "MailFromAttributes": { + "base": "A list of attributes that are associated with a MAIL FROM domain.
", + "refs": { + "GetEmailIdentityResponse$MailFromAttributes": "An object that contains information about the Mail-From attributes for the email identity.
" + } + }, + "MailFromDomainName": { + "base": "The domain that you want to use as a MAIL FROM domain.
", + "refs": { + "MailFromAttributes$MailFromDomain": "The name of a domain that an email identity uses as a custom MAIL FROM domain.
", + "PutEmailIdentityMailFromAttributesRequest$MailFromDomain": "The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:
It has to be a subdomain of the verified identity.
It can't be used to receive email.
It can't be used in a \"From\" address if the MAIL FROM domain is a destination for feedback forwarding emails.
The message can't be sent because the sending domain isn't verified.
", + "refs": { + } + }, + "MailFromDomainStatus": { + "base": "The status of the MAIL FROM domain. This status can have the following values:
PENDING
– Amazon SES hasn't started searching for the MX record yet.
SUCCESS
– Amazon SES detected the required MX record for the MAIL FROM domain.
FAILED
– Amazon SES can't find the required MX record, or the record no longer exists.
TEMPORARY_FAILURE
– A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.
The status of the MAIL FROM domain. This status can have the following values:
PENDING
– Amazon SES hasn't started searching for the MX record yet.
SUCCESS
– Amazon SES detected the required MX record for the MAIL FROM domain.
FAILED
– Amazon SES can't find the required MX record, or the record no longer exists.
TEMPORARY_FAILURE
– A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.
The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your sending quota.
" + } + }, + "MaxItems": { + "base": null, + "refs": { + "GetDedicatedIpsRequest$PageSize": "The number of results to show in a single call to GetDedicatedIpsRequest
. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken
element, which you can use to obtain additional results.
The number of results to show in a single call to ListConfigurationSets
. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken
element, which you can use to obtain additional results.
The number of results to show in a single call to ListDedicatedIpPools
. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken
element, which you can use to obtain additional results.
The number of results to show in a single call to ListDeliverabilityTestReports
. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken
element, which you can use to obtain additional results.
The value you specify has to be at least 0, and can be no more than 1000.
", + "ListDomainDeliverabilityCampaignsRequest$PageSize": "The maximum number of results to include in response to a single call to the ListDomainDeliverabilityCampaigns
operation. If the number of results is larger than the number that you specify in this parameter, the response includes a NextToken
element, which you can use to obtain additional results.
The number of results to show in a single call to ListEmailIdentities
. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken
element, which you can use to obtain additional results.
The value you specify has to be at least 0, and can be no more than 1000.
" + } + }, + "MaxSendRate": { + "base": null, + "refs": { + "SendQuota$MaxSendRate": "The maximum number of emails that you can send per second in the current AWS Region. This value is also called your maximum sending rate or your maximum TPS (transactions per second) rate.
" + } + }, + "Message": { + "base": "Represents the email message that you're sending. The Message
object consists of a subject line and a message body.
The simple email message. The message consists of a subject and a message body.
" + } + }, + "MessageContent": { + "base": "The body of an email message.
", + "refs": { + "GetDeliverabilityTestReportResponse$Message": "An object that contains the message that you sent when you performed this predictive inbox placement test.
" + } + }, + "MessageData": { + "base": null, + "refs": { + "Content$Data": "The content of the message itself.
" + } + }, + "MessageRejected": { + "base": "The message can't be sent because it contains invalid content.
", + "refs": { + } + }, + "MessageTag": { + "base": "Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.
", + "refs": { + "MessageTagList$member": null + } + }, + "MessageTagList": { + "base": "A list of message tags.
", + "refs": { + "SendEmailRequest$EmailTags": "A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail
operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
The name of the message tag. The message tag name has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The name of the message tag. The message tag name has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The value of the message tag. The message tag value has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
The value of the message tag. The message tag value has to meet the following criteria:
It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
It can contain no more than 256 characters.
A token returned from a previous call to GetDedicatedIps
to indicate the position of the dedicated IP pool in the list of IP pools.
A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to GetDedicatedIps
, passing this token in the NextToken
parameter.
A token returned from a previous call to ListConfigurationSets
to indicate the position in the list of configuration sets.
A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListConfigurationSets
, and pass this token in the NextToken
parameter.
A token returned from a previous call to ListDedicatedIpPools
to indicate the position in the list of dedicated IP pools.
A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ListDedicatedIpPools
, passing this token in the NextToken
parameter.
A token returned from a previous call to ListDeliverabilityTestReports
to indicate the position in the list of predictive inbox placement tests.
A token that indicates that there are additional predictive inbox placement tests to list. To view additional predictive inbox placement tests, issue another request to ListDeliverabilityTestReports
, and pass this token in the NextToken
parameter.
A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns
operation. This token indicates the position of a campaign in the list of campaigns.
A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns
operation. This token indicates the position of the campaign in the list of campaigns.
A token returned from a previous call to ListEmailIdentities
to indicate the position in the list of identities.
A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListEmailIdentities
, and pass this token in the NextToken
parameter.
The resource you attempted to access doesn't exist.
", + "refs": { + } + }, + "OutboundMessageId": { + "base": null, + "refs": { + "SendEmailResponse$MessageId": "A unique identifier for the message that is generated when the message is accepted.
It is possible for the Amazon SES API v2 to accept a message without sending it. This can happen when the message that you're trying to send has an attachment contains a virus, or when you send a templated email that contains invalid personalization content, for example.
An object that contains information about email that was sent from the selected domain.
", + "refs": { + "GetDomainStatisticsReportResponse$OverallVolume": "An object that contains deliverability metrics for the domain that you specified. The data in this object is a summary of all of the data that was collected from the StartDate
to the EndDate
.
An object that contains information about inbox placement percentages.
", + "refs": { + "DomainDeliverabilityCampaign$ReadRate": "The percentage of email messages that were opened by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.
", + "DomainDeliverabilityCampaign$DeleteRate": "The percentage of email messages that were deleted by recipients, without being opened first. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.
", + "DomainDeliverabilityCampaign$ReadDeleteRate": "The percentage of email messages that were opened and then deleted by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.
", + "DomainIspPlacement$InboxPercentage": "The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
", + "DomainIspPlacement$SpamPercentage": "The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
", + "OverallVolume$ReadRatePercent": "The percentage of emails that were sent from the domain that were read by their recipients.
", + "PlacementStatistics$InboxPercentage": "The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.
", + "PlacementStatistics$SpamPercentage": "The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.
", + "PlacementStatistics$MissingPercentage": "The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.
", + "PlacementStatistics$SpfPercentage": "The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.
", + "PlacementStatistics$DkimPercentage": "The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.
" + } + }, + "Percentage100Wrapper": { + "base": null, + "refs": { + "DedicatedIp$WarmupPercentage": "Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.
", + "PutDedicatedIpWarmupAttributesRequest$WarmupPercentage": "The warm-up percentage that you want to associate with the dedicated IP address.
" + } + }, + "PinpointDestination": { + "base": "An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.
", + "refs": { + "EventDestination$PinpointDestination": "An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.
", + "EventDestinationDefinition$PinpointDestination": "An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.
" + } + }, + "PlacementStatistics": { + "base": "An object that contains inbox placement data for an email provider.
", + "refs": { + "GetDeliverabilityTestReportResponse$OverallPlacement": "An object that specifies how many test messages that were sent during the predictive inbox placement test were delivered to recipients' inboxes, how many were sent to recipients' spam folders, and how many weren't delivered.
", + "IspPlacement$PlacementStatistics": "An object that contains inbox placement metrics for a specific email provider.
" + } + }, + "PoolName": { + "base": "The name of a dedicated IP pool.
", + "refs": { + "CreateDedicatedIpPoolRequest$PoolName": "The name of the dedicated IP pool.
", + "DedicatedIp$PoolName": "The name of the dedicated IP pool that the IP address is associated with.
", + "DeleteDedicatedIpPoolRequest$PoolName": "The name of the dedicated IP pool that you want to delete.
", + "DeliveryOptions$SendingPoolName": "The name of the dedicated IP pool that you want to associate with the configuration set.
", + "GetDedicatedIpsRequest$PoolName": "The name of the IP pool that the dedicated IP address is associated with.
", + "ListOfDedicatedIpPools$member": null, + "PutDedicatedIpInPoolRequest$DestinationPoolName": "The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.
" + } + }, + "PutAccountDedicatedIpWarmupAttributesRequest": { + "base": "A request to enable or disable the automatic IP address warm-up feature.
", + "refs": { + } + }, + "PutAccountDedicatedIpWarmupAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutAccountSendingAttributesRequest": { + "base": "A request to change the ability of your account to send email.
", + "refs": { + } + }, + "PutAccountSendingAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutConfigurationSetDeliveryOptionsRequest": { + "base": "A request to associate a configuration set with a dedicated IP pool.
", + "refs": { + } + }, + "PutConfigurationSetDeliveryOptionsResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutConfigurationSetReputationOptionsRequest": { + "base": "A request to enable or disable tracking of reputation metrics for a configuration set.
", + "refs": { + } + }, + "PutConfigurationSetReputationOptionsResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutConfigurationSetSendingOptionsRequest": { + "base": "A request to enable or disable the ability of Amazon SES to send emails that use a specific configuration set.
", + "refs": { + } + }, + "PutConfigurationSetSendingOptionsResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutConfigurationSetTrackingOptionsRequest": { + "base": "A request to add a custom domain for tracking open and click events to a configuration set.
", + "refs": { + } + }, + "PutConfigurationSetTrackingOptionsResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutDedicatedIpInPoolRequest": { + "base": "A request to move a dedicated IP address to a dedicated IP pool.
", + "refs": { + } + }, + "PutDedicatedIpInPoolResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutDedicatedIpWarmupAttributesRequest": { + "base": "A request to change the warm-up attributes for a dedicated IP address. This operation is useful when you want to resume the warm-up process for an existing IP address.
", + "refs": { + } + }, + "PutDedicatedIpWarmupAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutDeliverabilityDashboardOptionRequest": { + "base": "Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email using Amazon SES API v2. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.
", + "refs": { + } + }, + "PutDeliverabilityDashboardOptionResponse": { + "base": "A response that indicates whether the Deliverability dashboard is enabled.
", + "refs": { + } + }, + "PutEmailIdentityDkimAttributesRequest": { + "base": "A request to enable or disable DKIM signing of email that you send from an email identity.
", + "refs": { + } + }, + "PutEmailIdentityDkimAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutEmailIdentityFeedbackAttributesRequest": { + "base": "A request to set the attributes that control how bounce and complaint events are processed.
", + "refs": { + } + }, + "PutEmailIdentityFeedbackAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "PutEmailIdentityMailFromAttributesRequest": { + "base": "A request to configure the custom MAIL FROM domain for a verified identity.
", + "refs": { + } + }, + "PutEmailIdentityMailFromAttributesResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "RawMessage": { + "base": "The raw email message.
", + "refs": { + "EmailContent$Raw": "The raw email message. The message has to meet the following criteria:
The message has to contain a header and a body, separated by one blank line.
All of the required header fields must be present in the message.
Each part of a multipart MIME message must be formatted properly.
If you include attachments, they must be in a file format that the Amazon SES API v2 supports.
The entire message must be Base64 encoded.
If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.
The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.
The raw email message. The message has to meet the following criteria:
The message has to contain a header and a body, separated by one blank line.
All of the required header fields must be present in the message.
Each part of a multipart MIME message must be formatted properly.
Attachments must be in a file format that the Amazon SES API v2 supports.
The entire message must be Base64 encoded.
If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.
The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.
The raw email message. The message has to meet the following criteria:
The message has to contain a header and a body, separated by one blank line.
All of the required header fields must be present in the message.
Each part of a multipart MIME message must be formatted properly.
Attachments must be in a file format that the Amazon SES API v2 supports.
The entire message must be Base64 encoded.
If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.
The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.
The name of a blacklist that an IP address was found on.
", + "refs": { + "BlacklistEntry$RblName": "The name of the blacklist that the IP address appears on.
" + } + }, + "ReportId": { + "base": "A unique string that identifies a Deliverability dashboard report.
", + "refs": { + "CreateDeliverabilityTestReportResponse$ReportId": "A unique string that identifies the predictive inbox placement test.
", + "DeliverabilityTestReport$ReportId": "A unique string that identifies the predictive inbox placement test.
", + "GetDeliverabilityTestReportRequest$ReportId": "A unique string that identifies the predictive inbox placement test.
" + } + }, + "ReportName": { + "base": "A name that helps you identify a report generated by the Deliverability dashboard.
", + "refs": { + "CreateDeliverabilityTestReportRequest$ReportName": "A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.
", + "DeliverabilityTestReport$ReportName": "A name that helps you identify a predictive inbox placement test report.
" + } + }, + "ReputationOptions": { + "base": "Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current AWS Region.
", + "refs": { + "CreateConfigurationSetRequest$ReputationOptions": "An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.
", + "GetConfigurationSetResponse$ReputationOptions": "An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.
" + } + }, + "SendEmailRequest": { + "base": "A request to send an email message.
", + "refs": { + } + }, + "SendEmailResponse": { + "base": "A unique message ID that you receive when an email is accepted for sending.
", + "refs": { + } + }, + "SendQuota": { + "base": "An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.
", + "refs": { + "GetAccountResponse$SendQuota": "An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.
" + } + }, + "SendingOptions": { + "base": "Used to enable or disable email sending for messages that use this configuration set in the current AWS Region.
", + "refs": { + "CreateConfigurationSetRequest$SendingOptions": "An object that defines whether or not Amazon SES can send email that you send using the configuration set.
", + "GetConfigurationSetResponse$SendingOptions": "An object that defines whether or not Amazon SES can send email that you send using the configuration set.
" + } + }, + "SendingPausedException": { + "base": "The message can't be sent because the account's ability to send email is currently paused.
", + "refs": { + } + }, + "SendingPoolName": { + "base": "The name of the dedicated IP pool that you want to associate with the configuration set.
", + "refs": { + "PutConfigurationSetDeliveryOptionsRequest$SendingPoolName": "The name of the dedicated IP pool that you want to associate with the configuration set.
" + } + }, + "SentLast24Hours": { + "base": null, + "refs": { + "SendQuota$SentLast24Hours": "The number of emails sent from your Amazon SES account in the current AWS Region over the past 24 hours.
" + } + }, + "SnsDestination": { + "base": "An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
", + "refs": { + "EventDestination$SnsDestination": "An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
", + "EventDestinationDefinition$SnsDestination": "An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
" + } + }, + "Subject": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$Subject": "The subject line, or title, of the email message.
" + } + }, + "Tag": { + "base": "An object that defines the tags that are associated with a resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
Tag keys and values are case sensitive.
For each associated resource, each tag key must be unique and it can have only one value.
The aws:
prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.
To remove more than one tag from the resource, append the TagKeys
parameter and argument for each additional tag to remove, separated by an ampersand. For example: /v2/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2
An array of objects that define the tags (keys and values) that you want to associate with the configuration set.
", + "CreateDedicatedIpPoolRequest$Tags": "An object that defines the tags (keys and values) that you want to associate with the pool.
", + "CreateDeliverabilityTestReportRequest$Tags": "An array of objects that define the tags (keys and values) that you want to associate with the predictive inbox placement test.
", + "CreateEmailIdentityRequest$Tags": "An array of objects that define the tags (keys and values) that you want to associate with the email identity.
", + "GetConfigurationSetResponse$Tags": "An array of objects that define the tags (keys and values) that are associated with the configuration set.
", + "GetDeliverabilityTestReportResponse$Tags": "An array of objects that define the tags (keys and values) that are associated with the predictive inbox placement test.
", + "GetEmailIdentityResponse$Tags": "An array of objects that define the tags (keys and values) that are associated with the email identity.
", + "ListTagsForResourceResponse$Tags": "An array that lists all the tags that are associated with the resource. Each tag consists of a required tag key (Key
) and an associated tag value (Value
)
A list of the tags that you want to add to the resource. A tag consists of a required tag key (Key
) and an associated tag value (Value
). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don't want a resource to have a specific tag value, don't specify a value for this parameter. If you don't specify a value, Amazon SES sets the value to an empty string.
" + } + }, + "Template": { + "base": "An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to define, save, and reuse in email messages that you send.
", + "refs": { + "EmailContent$Template": "The template to use for the email message.
" + } + }, + "TemplateArn": { + "base": null, + "refs": { + "Template$TemplateArn": "The Amazon Resource Name (ARN) of the template.
" + } + }, + "TemplateData": { + "base": null, + "refs": { + "Template$TemplateData": "An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.
" + } + }, + "Timestamp": { + "base": null, + "refs": { + "BlacklistEntry$ListingTime": "The time when the blacklisting event occurred, shown in Unix time format.
", + "DailyVolume$StartDate": "The date that the DailyVolume metrics apply to, in Unix time.
", + "DeliverabilityTestReport$CreateDate": "The date and time when the predictive inbox placement test was created, in Unix time format.
", + "DomainDeliverabilityCampaign$FirstSeenDateTime": "The first time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.
", + "DomainDeliverabilityCampaign$LastSeenDateTime": "The last time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.
", + "DomainDeliverabilityTrackingOption$SubscriptionStartDate": "The date, in Unix time format, when you enabled the Deliverability dashboard for the domain.
", + "GetDeliverabilityDashboardOptionsResponse$SubscriptionExpiryDate": "The date, in Unix time format, when your current subscription to the Deliverability dashboard is scheduled to expire, if your subscription is scheduled to expire at the end of the current calendar month. This value is null if you have an active subscription that isn’t due to expire at the end of the month.
", + "GetDomainStatisticsReportRequest$StartDate": "The first day (in Unix time) that you want to obtain domain deliverability metrics for.
", + "GetDomainStatisticsReportRequest$EndDate": "The last day (in Unix time) that you want to obtain domain deliverability metrics for. The EndDate
that you specify has to be less than or equal to 30 days after the StartDate
.
The first day, in Unix time format, that you want to obtain deliverability data for.
", + "ListDomainDeliverabilityCampaignsRequest$EndDate": "The last day, in Unix time format, that you want to obtain deliverability data for. This value has to be less than or equal to 30 days after the value of the StartDate
parameter.
Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require
, messages are only delivered if a TLS connection can be established. If the value is Optional
, messages can be delivered in plain text if a TLS connection can't be established.
Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require
, messages are only delivered if a TLS connection can be established. If the value is Optional
, messages can be delivered in plain text if a TLS connection can't be established.
Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require
, messages are only delivered if a TLS connection can be established. If the value is Optional
, messages can be delivered in plain text if a TLS connection can't be established.
Too many requests have been made to the operation.
", + "refs": { + } + }, + "TrackingOptions": { + "base": "An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.
These images and links include references to a domain operated by AWS. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.
", + "refs": { + "CreateConfigurationSetRequest$TrackingOptions": "An object that defines the open and click tracking options for emails that you send using the configuration set.
", + "GetConfigurationSetResponse$TrackingOptions": "An object that defines the open and click tracking options for emails that you send using the configuration set.
" + } + }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, + "UpdateConfigurationSetEventDestinationRequest": { + "base": "A request to change the settings for an event destination for a configuration set.
", + "refs": { + } + }, + "UpdateConfigurationSetEventDestinationResponse": { + "base": "An HTTP 200 response if the request succeeds, or an error message if the request fails.
", + "refs": { + } + }, + "Volume": { + "base": "An object that contains information about inbox placement volume.
", + "refs": { + "DomainDeliverabilityCampaign$InboxCount": "The number of email messages that were delivered to recipients’ inboxes.
", + "DomainDeliverabilityCampaign$SpamCount": "The number of email messages that were delivered to recipients' spam or junk mail folders.
", + "DomainDeliverabilityCampaign$ProjectedVolume": "The projected number of recipients that the email message was sent to.
", + "DomainIspPlacement$InboxRawCount": "The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
", + "DomainIspPlacement$SpamRawCount": "The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
", + "VolumeStatistics$InboxRawCount": "The total number of emails that arrived in recipients' inboxes.
", + "VolumeStatistics$SpamRawCount": "The total number of emails that arrived in recipients' spam or junk mail folders.
", + "VolumeStatistics$ProjectedInbox": "An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.
", + "VolumeStatistics$ProjectedSpam": "An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.
" + } + }, + "VolumeStatistics": { + "base": "An object that contains information about the amount of email that was delivered to recipients.
", + "refs": { + "DailyVolume$VolumeStatistics": "An object that contains inbox placement metrics for a specific day in the analysis period.
", + "OverallVolume$VolumeStatistics": "An object that contains information about the numbers of messages that arrived in recipients' inboxes and junk mail folders.
" + } + }, + "WarmupStatus": { + "base": "The warmup status of a dedicated IP.
", + "refs": { + "DedicatedIp$WarmupStatus": "The warm-up status of a dedicated IP address. The status can have one of the following values:
IN_PROGRESS
– The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.
DONE
– The dedicated IP warm-up process is complete, and the IP address is ready to use.
Creates a new maintenance window.
The value you specify for Duration
determines the specific end time for the maintenance window based on the time it begins. No maintenance window tasks are permitted to start after the resulting endtime minus the number of hours you specify for Cutoff
. For example, if the maintenance window starts at 3 PM, the duration is three hours, and the value you specify for Cutoff
is one hour, no maintenance window tasks can start after 5 PM.
Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.
Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.
", "CreatePatchBaseline": "Creates a patch baseline.
For information about valid key and value pairs in PatchFilters
for each supported operating system type, see PatchFilter.
Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync.
By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.
", + "CreateResourceDataSync": "A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination
and SyncFromSource
.
You can configure Systems Manager Inventory to use the SyncToDestination
type to synchronize Inventory data from multiple AWS Regions to a single Amazon S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.
You can configure Systems Manager Explorer to use the SyncToDestination
type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single Amazon S3 bucket. You can also configure Explorer to use the SyncFromSource
type. This type synchronizes OpsItems and OpsData from multiple AWS accounts and Regions by using AWS Organizations. For more information, see Setting Up Explorer to Display Data from Multiple Accounts and Regions in the AWS Systems Manager User Guide.
A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.
By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.
Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.
", "DeleteAssociation": "Disassociates the specified Systems Manager document from the specified instance.
When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.
", "DeleteDocument": "Deletes the Systems Manager document and all instance associations to the document.
Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.
", @@ -21,7 +21,7 @@ "DeleteParameter": "Delete a parameter from the system.
", "DeleteParameters": "Delete a list of parameters.
", "DeletePatchBaseline": "Deletes a patch baseline.
", - "DeleteResourceDataSync": "Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to inventory data on managed instances are no longer synced with the target Amazon S3 bucket. Deleting a sync configuration does not delete data in the target Amazon S3 bucket.
", + "DeleteResourceDataSync": "Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to data on managed instances are no longer synced to or from the target. Deleting a sync configuration does not delete data.
", "DeregisterManagedInstance": "Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling SSM Agent first.
", "DeregisterPatchBaselineForPatchGroup": "Removes a patch group from a patch baseline.
", "DeregisterTargetFromMaintenanceWindow": "Removes a target from a maintenance window.
", @@ -52,7 +52,7 @@ "DescribeMaintenanceWindows": "Retrieves the maintenance windows in an AWS account.
", "DescribeMaintenanceWindowsForTarget": "Retrieves information about the maintenance window targets or tasks that an instance is associated with.
", "DescribeOpsItems": "Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.
Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.
", - "DescribeParameters": "Get information about a parameter.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
Get information about a parameter.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
Lists the patch baselines in your AWS account.
", "DescribePatchGroupState": "Returns high-level aggregated patch compliance state for a patch group.
", "DescribePatchGroups": "Lists all patch groups that have been registered with patch baselines.
", @@ -76,7 +76,7 @@ "GetParameter": "Get information about a parameter by using the parameter name. Don't confuse this API action with the GetParameters API action.
", "GetParameterHistory": "Query a list of all parameters used by the AWS account.
", "GetParameters": "Get details of a parameter. Don't confuse this API action with the GetParameter API action.
", - "GetParametersByPath": "Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters in the AWS Systems Manager User Guide.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
This API action doesn't support filtering by tags.
Retrieve information about one or more parameters in a specific hierarchy.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
Retrieves information about a patch baseline.
", "GetPatchBaselineForPatchGroup": "Retrieves the patch baseline that should be used for the specified patch group.
", "GetServiceSetting": " ServiceSetting
is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.
Services map a SettingId
object to a setting value. AWS services teams define the default value for a SettingId
. You can't create a new SettingId
, but you can overwrite the default value if you have the ssm:UpdateServiceSetting
permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.
Query the current service setting for the account.
", @@ -528,6 +528,12 @@ "AttachmentContent$HashType": "The hash algorithm used to calculate the hash value.
" } }, + "AttachmentIdentifier": { + "base": null, + "refs": { + "AttachmentsSource$Name": "The name of the document attachment file.
" + } + }, "AttachmentInformation": { "base": "An attribute of an attachment, such as the attachment name.
", "refs": { @@ -554,7 +560,7 @@ } }, "AttachmentsSource": { - "base": "A key and value pair that identifies the location of an attachment to a document.
", + "base": "Identifying information about a document attachment, including the file name and a key-value pair that identifies the location of an attachment to a document.
", "refs": { "AttachmentsSourceList$member": null } @@ -562,7 +568,7 @@ "AttachmentsSourceKey": { "base": null, "refs": { - "AttachmentsSource$Key": "The key of a key and value pair that identifies the location of an attachment to a document.
" + "AttachmentsSource$Key": "The key of a key-value pair that identifies the location of an attachment to a document.
" } }, "AttachmentsSourceList": { @@ -581,7 +587,7 @@ "AttachmentsSourceValues": { "base": null, "refs": { - "AttachmentsSource$Values": "The URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.
" + "AttachmentsSource$Values": "The value of a key-value pair that identifies the location of an attachment to a document. The format is the URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.
" } }, "AttributeName": { @@ -907,7 +913,7 @@ "CommandFilterList": { "base": null, "refs": { - "ListCommandInvocationsRequest$Filters": "(Optional) One or more filters. Use a filter to return a more specific list of results.
", + "ListCommandInvocationsRequest$Filters": "(Optional) One or more filters. Use a filter to return a more specific list of results. Note that the DocumentName
filter is not supported for ListCommandInvocations.
(Optional) One or more filters. Use a filter to return a more specific list of results.
" } }, @@ -2700,7 +2706,7 @@ "ListCommandsRequest$InstanceId": "(Optional) Lists commands issued against this instance ID.
", "ListInventoryEntriesRequest$InstanceId": "The instance ID for which you want inventory information.
", "ListInventoryEntriesResult$InstanceId": "The instance ID targeted by the request to query inventory information.
", - "PutInventoryRequest$InstanceId": "One or more instance IDs where you want to add or update inventory items.
", + "PutInventoryRequest$InstanceId": "An instance ID where you want to add or update inventory items.
", "UpdateAssociationStatusRequest$InstanceId": "The ID of the instance.
" } }, @@ -4443,7 +4449,8 @@ "OpsDataTypeName": { "base": null, "refs": { - "OpsAggregator$TypeName": "The data type name to use for viewing counts of OpsItems.
" + "OpsAggregator$TypeName": "The data type name to use for viewing counts of OpsItems.
", + "OpsResultAttribute$TypeName": "Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, AWS:OpsItemTrendline, or AWS:ComplianceSummary.
" } }, "OpsEntity": { @@ -4464,6 +4471,12 @@ "OpsEntityItemMap$value": null } }, + "OpsEntityItemCaptureTime": { + "base": null, + "refs": { + "OpsEntityItem$CaptureTime": "The time OpsItem data was captured.
" + } + }, "OpsEntityItemEntry": { "base": null, "refs": { @@ -4542,6 +4555,15 @@ "refs": { } }, + "OpsItemCategory": { + "base": null, + "refs": { + "CreateOpsItemRequest$Category": "Specify a category to assign to an OpsItem.
", + "OpsItem$Category": "An OpsItem category. Category options include: Availability, Cost, Performance, Recovery, Security.
", + "OpsItemSummary$Category": "A list of OpsItems by category.
", + "UpdateOpsItemRequest$Category": "Specify a new category for an OpsItem.
" + } + }, "OpsItemDataKey": { "base": null, "refs": { @@ -4685,6 +4707,15 @@ "UpdateOpsItemRequest$Priority": "The importance of this OpsItem in relation to other OpsItems in the system.
" } }, + "OpsItemSeverity": { + "base": null, + "refs": { + "CreateOpsItemRequest$Severity": "Specify a severity to assign to an OpsItem.
", + "OpsItem$Severity": "The severity of the OpsItem. Severity options range from 1 to 4.
", + "OpsItemSummary$Severity": "A list of OpsItems by severity.
", + "UpdateOpsItemRequest$Severity": "Specify a new severity for an OpsItem.
" + } + }, "OpsItemSource": { "base": null, "refs": { @@ -4722,6 +4753,18 @@ "UpdateOpsItemRequest$Title": "A short heading that describes the nature of the OpsItem and the impacted resource.
" } }, + "OpsResultAttribute": { + "base": "The OpsItem data type to return.
", + "refs": { + "OpsResultAttributeList$member": null + } + }, + "OpsResultAttributeList": { + "base": null, + "refs": { + "GetOpsSummaryRequest$ResultAttributes": "The OpsItem data type to return.
" + } + }, "OutputSource": { "base": "Information about the source where the association execution details are stored.
", "refs": { @@ -4921,7 +4964,7 @@ } }, "ParameterStringFilter": { - "base": "One or more filters. Use a filter to return a more specific list of results.
The Name
and Tier
filter keys can't be used with the GetParametersByPath API action. Also, the Label
filter key can't be used with the DescribeParameters API action.
One or more filters. Use a filter to return a more specific list of results.
The ParameterStringFilter
object is used by the DescribeParameters and GetParametersByPath API actions. However, not all of the pattern values listed for Key
can be used with both actions.
For DescribeActions
, all of the listed patterns are valid, with the exception of Label
.
For GetParametersByPath
, the following patterns listed for Key
are not valid: Name
, Path
, and Tier
.
For examples of CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager Parameters in the AWS Systems Manager User Guide.
Filters to limit the request results.
", - "GetParametersByPathRequest$ParameterFilters": "Filters to limit the request results.
You can't filter using the parameter name.
Filters to limit the request results.
" } }, "ParameterStringFilterValue": { @@ -4954,7 +4997,7 @@ "ParameterStringQueryOption": { "base": null, "refs": { - "ParameterStringFilter$Option": "Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.
" + "ParameterStringFilter$Option": "For all filters used with DescribeParameters, valid options include Equals
and BeginsWith
. The Name
filter additionally supports the Contains
option. (Exception: For filters using the key Path
, valid options include Recursive
and OneLevel
.)
For filters used with GetParametersByPath, valid options include Equals
and BeginsWith
. (Exception: For filters using the key Label
, the only valid option is Equals
.)
One or more filters. Use a filter to return a more specific list of results.
" + "DescribeParametersRequest$Filters": "This data type is deprecated. Instead, use ParameterFilters
.
Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from AWS Organizations or, if an AWS Organization is not present, from multiple AWS Regions.
", + "refs": { + "ResourceDataSyncSource$AwsOrganizationsSource": "The field name in SyncSource
for the ResourceDataSyncAwsOrganizationsSource
type.
The field name in SyncSource
for the ResourceDataSyncAwsOrganizationsSource
type.
You have exceeded the allowed maximum sync configurations.
", "refs": { @@ -5701,6 +5751,13 @@ "ResourceDataSyncItem$SyncCreatedTime": "The date and time the configuration was created (UTC).
" } }, + "ResourceDataSyncIncludeFutureRegions": { + "base": null, + "refs": { + "ResourceDataSyncSource$IncludeFutureRegions": "Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.
", + "ResourceDataSyncSourceWithState$IncludeFutureRegions": "Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.
" + } + }, "ResourceDataSyncInvalidConfigurationException": { "base": "The specified sync configuration is invalid.
", "refs": { @@ -5718,11 +5775,18 @@ "ListResourceDataSyncResult$ResourceDataSyncItems": "A list of your current Resource Data Sync configurations and their statuses.
" } }, + "ResourceDataSyncLastModifiedTime": { + "base": null, + "refs": { + "ResourceDataSyncItem$SyncLastModifiedTime": "The date and time the resource data sync was changed.
" + } + }, "ResourceDataSyncName": { "base": null, "refs": { "CreateResourceDataSyncRequest$SyncName": "A name for the configuration.
", "DeleteResourceDataSyncRequest$SyncName": "The name of the configuration to delete.
", + "GetOpsSummaryRequest$SyncName": "Specify the name of a resource data sync to get.
", "ResourceDataSyncAlreadyExistsException$SyncName": null, "ResourceDataSyncItem$SyncName": "The name of the Resource Data Sync.
", "ResourceDataSyncNotFoundException$SyncName": null @@ -5733,6 +5797,30 @@ "refs": { } }, + "ResourceDataSyncOrganizationSourceType": { + "base": null, + "refs": { + "ResourceDataSyncAwsOrganizationsSource$OrganizationSourceType": "If an AWS Organization is present, this is either OrganizationalUnits
or EntireOrganization
. For OrganizationalUnits
, the data is aggregated from a set of organization units. For EntireOrganization
, the data is aggregated from the entire AWS Organization.
The AWS Organizations organizational unit data source for the sync.
", + "refs": { + "ResourceDataSyncOrganizationalUnitList$member": null + } + }, + "ResourceDataSyncOrganizationalUnitId": { + "base": null, + "refs": { + "ResourceDataSyncOrganizationalUnit$OrganizationalUnitId": "The AWS Organization unit ID data source for the sync.
" + } + }, + "ResourceDataSyncOrganizationalUnitList": { + "base": null, + "refs": { + "ResourceDataSyncAwsOrganizationsSource$OrganizationalUnits": "The AWS Organizations organization units included in the sync.
" + } + }, "ResourceDataSyncS3BucketName": { "base": null, "refs": { @@ -5764,6 +5852,54 @@ "ResourceDataSyncS3Destination$Region": "The AWS Region with the Amazon S3 bucket targeted by the Resource Data Sync.
" } }, + "ResourceDataSyncSource": { + "base": "Information about the source of the data included in the resource data sync.
", + "refs": { + "CreateResourceDataSyncRequest$SyncSource": "Specify information about the data sources to synchronize.
" + } + }, + "ResourceDataSyncSourceRegion": { + "base": null, + "refs": { + "ResourceDataSyncSourceRegionList$member": null + } + }, + "ResourceDataSyncSourceRegionList": { + "base": null, + "refs": { + "ResourceDataSyncSource$SourceRegions": "The SyncSource
AWS Regions included in the resource data sync.
The SyncSource
AWS Regions included in the resource data sync.
The type of data source for the resource data sync. SourceType
is either AwsOrganizations
(if an organization is present in AWS Organizations) or singleAccountMultiRegions
.
The type of data source for the resource data sync. SourceType
is either AwsOrganizations
(if an organization is present in AWS Organizations) or singleAccountMultiRegions
.
The data type name for including resource data sync state. There are four sync states:
OrganizationNotExists
(Your organization doesn't exist)
NoPermissions
(The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.)
InvalidOrganizationalUnit
(You specified or selected an invalid unit in the resource data sync configuration.)
TrustedAccessDisabled
(You disabled Systems Manager access in the organization in AWS Organizations.)
Information about the source where the data was synchronized.
" + } + }, + "ResourceDataSyncState": { + "base": null, + "refs": { + "ResourceDataSyncSourceWithState$State": "The data type name for including resource data sync state. There are four sync states:
OrganizationNotExists
: Your organization doesn't exist.
NoPermissions
: The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.
InvalidOrganizationalUnit
: You specified or selected an invalid unit in the resource data sync configuration.
TrustedAccessDisabled
: You disabled Systems Manager access in the organization in AWS Organizations.
Specify SyncToDestination
to create a resource data sync that synchronizes data from multiple AWS Regions to an Amazon S3 bucket. Specify SyncFromSource
to synchronize data from multiple AWS accounts and Regions, as listed in AWS Organizations.
Specify the type of resource data sync to delete.
", + "ListResourceDataSyncRequest$SyncType": "View a list of resource data syncs according to the sync type. Specify SyncToDestination
to view resource data syncs that synchronize data to an Amazon S3 buckets. Specify SyncFromSource
to view resource data syncs from AWS Organizations or from multiple AWS Regions.
The type of resource data sync. If SyncType
is SyncToDestination
, then the resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType
is SyncFromSource
then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.
The ID of an OpsItem related to the current OpsItem.
", "ResourceDataSyncCountExceededException$Message": null, "ResourceDataSyncInvalidConfigurationException$Message": null, + "ResourceDataSyncNotFoundException$Message": null, "ResourceInUseException$Message": null, "ResourceLimitExceededException$Message": null, "ServiceSetting$LastModifiedUser": "The ARN of the last modified user. This field is populated only if the setting value was overwritten.
", @@ -6465,7 +6602,7 @@ } }, "Target": { - "base": "An array of search criteria that targets instances using a Key,Value combination that you specify.
Supported formats include the following.
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2
Key=tag-key,Values=my-tag-key-1,my-tag-key-2
(Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name
(Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2
For example:
Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE
Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3
Key=tag-key,Values=Name,Instance-Type,CostCenter
(Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup
(Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC
For information about how to send commands that target instances using Key,Value
parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.
An array of search criteria that targets instances using a Key,Value combination that you specify.
Supported formats include the following.
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2
Key=tag-key,Values=my-tag-key-1,my-tag-key-2
(Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name
(Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2
For example:
Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE
Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3
Key=tag-key,Values=Name,Instance-Type,CostCenter
(Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup
This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.
(Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC
This example demonstrates how to target only Amazon EC2 instances and VPCs in your maintenance window.
(State Manager association targets only) Key=InstanceIds,Values=*
This example demonstrates how to target all managed instances in the AWS Region where the association was created.
For information about how to send commands that target instances using Key,Value
parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.
Creates a volume on a specified gateway. This operation is only supported in the stored volume gateway type.
The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.
In the request you must specify the gateway and the disk information on which you are creating the volume. In response, the gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.
", "CreateTapeWithBarcode": "Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.
Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.
Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.
Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.
Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request.
", - "DeleteChapCredentials": "Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.
", + "DeleteBandwidthRateLimit": "Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request. This operation is supported for the stored volume, cached volume and tape gateway types.
", + "DeleteChapCredentials": "Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair. This operation is supported in volume and tape gateway types.
", "DeleteFileShare": "Deletes a file share from a file gateway. This operation is only supported for file gateways.
", "DeleteGateway": "Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.
After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.
You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription. If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.
Deletes a snapshot of a volume.
You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule
request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.
To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.
Deletes the specified virtual tape. This operation is only supported in the tape gateway type.
", "DeleteTapeArchive": "Deletes the specified virtual tape from the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.
", "DeleteVolume": "Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.
Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.
In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.
", - "DescribeBandwidthRateLimit": "Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect.
This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.
", + "DescribeAvailabilityMonitorTest": "Returns information about the most recent High Availability monitoring test that was performed on the host in a cluster. If a test isn't performed, the status and start time in the response would be null.
", + "DescribeBandwidthRateLimit": "Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.'
This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.
", "DescribeCache": "Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape and file gateway types.
The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.
", "DescribeCachediSCSIVolumes": "Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.
The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).
", - "DescribeChapCredentials": "Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair.
", + "DescribeChapCredentials": "Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. This operation is supported in the volume and tape gateway types.
", "DescribeGatewayInformation": "Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.
", "DescribeMaintenanceStartTime": "Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.
", "DescribeNFSFileShares": "Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported for file gateways.
", @@ -44,29 +45,30 @@ "DescribeUploadBuffer": "Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume and tape gateway types.
The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.
", "DescribeVTLDevices": "Returns a description of virtual tape library (VTL) devices for the specified tape gateway. In the response, AWS Storage Gateway returns VTL device information.
This operation is only supported in the tape gateway type.
", "DescribeWorkingStorage": "Returns information about the working storage of a gateway. This operation is only supported in the stored volumes gateway type. This operation is deprecated in cached volumes API version (20120630). Use DescribeUploadBuffer instead.
Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.
The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.
", - "DetachVolume": "Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.
", + "DetachVolume": "Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance. This operation is only supported in the volume gateway type.
", "DisableGateway": "Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.
Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.
Once a gateway is disabled it cannot be enabled.
Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.
", "ListFileShares": "Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported for file gateways.
", "ListGateways": "Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).
By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.
If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.
", "ListLocalDisks": "Returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.
The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a DiskStatus
field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).
Lists the tags that have been added to the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway type.
", + "ListTagsForResource": "Lists the tags that have been added to the specified resource. This operation is supported in storage gateways of all types.
", "ListTapes": "Lists virtual tapes in your virtual tape library (VTL) and your virtual tape shelf (VTS). You specify the tapes to list by specifying one or more tape Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation lists all virtual tapes in both your VTL and VTS.
This operation supports pagination. By default, the operation returns a maximum of up to 100 tapes. You can optionally specify the Limit
parameter in the body to limit the number of tapes in the response. If the number of tapes returned in the response is truncated, the response includes a Marker
element that you can use in your subsequent request to retrieve the next set of tapes. This operation is only supported in the tape gateway type.
Lists iSCSI initiators that are connected to a volume. You can use this operation to determine whether a volume is being used or not. This operation is only supported in the cached volume and stored volume gateway types.
", "ListVolumeRecoveryPoints": "Lists the recovery points for a specified gateway. This operation is only supported in the cached volume gateway type.
Each cache volume has one recovery point. A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot or clone a new cached volume from a source volume. To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint operation.
", "ListVolumes": "Lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The response includes only the volume ARNs. If you want additional volume information, use the DescribeStorediSCSIVolumes or the DescribeCachediSCSIVolumes API.
The operation supports pagination. By default, the operation returns a maximum of up to 100 volumes. You can optionally specify the Limit
field in the body to limit the number of volumes in the response. If the number of volumes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of volumes. This operation is only supported in the cached volume and stored volume gateway types.
Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.
AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.
For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).
", - "RefreshCache": "Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.
When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache
operation completes.
Removes one or more tags from the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway types.
", + "RefreshCache": "Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.
When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache
operation completes.
Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting Notified About File Operations.
If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException
error because too many requests were sent to the server.
For more information, see \"https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification\".
", + "RemoveTagsFromResource": "Removes one or more tags from the specified resource. This operation is supported in storage gateways of all types.
", "ResetCache": "Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.
If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.
Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a tape gateway. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS, that is, archive. This operation is only supported in the tape gateway type.
Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway. This operation is only supported in the tape gateway type.
", "RetrieveTapeRecoveryPoint": "Retrieves the recovery point for the specified virtual tape. This operation is only supported in the tape gateway type.
A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.
The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a tape gateway. There is no charge for retrieving recovery points.
Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.
", "SetSMBGuestPassword": "Sets the password for the guest user smbguest
. The smbguest
user is the user when the authentication method for the file share is set to GuestAccess
.
Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.
The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.
If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.
After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.
When you make a shutdown request, you will get a 200 OK
success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.
If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.
", + "StartAvailabilityMonitorTest": "Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest
API.
Starting this test will cause your gateway to go offline for a brief period.
Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.
When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.
To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.
", - "UpdateBandwidthRateLimit": "Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains.
By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.
To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
", - "UpdateChapCredentials": "Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.
When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.
Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume and tape gateway types.'
By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.
To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
", + "UpdateChapCredentials": "Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it. This operation is supported in the volume and tape gateway types.
When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.
Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
When you make this request, you get a 200 OK
success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING
state.
A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.
Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.
", @@ -93,6 +95,13 @@ "ActivateGatewayInput$ActivationKey": "Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey
. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway
API call determine the actual configuration of your gateway.
For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html in the Storage Gateway User Guide.
" } }, + "ActiveDirectoryStatus": { + "base": null, + "refs": { + "DescribeSMBSettingsOutput$ActiveDirectoryStatus": "Indicates the status of a gateway that is a member of the Active Directory domain.
ACCESS_DENIED: Indicates that the JoinDomain
operation failed due to an authentication error.
DETACHED: Indicates that gateway is not joined to a domain.
JOINED: Indicates that the gateway has successfully joined a domain.
JOINING: Indicates that a JoinDomain
operation is in progress.
NETWORK_ERROR: Indicates that JoinDomain
operation failed due to a network or connectivity error.
TIMEOUT: Indicates that the JoinDomain
operation failed because the operation didn't complete within the allotted time.
UNKNOWN_ERROR: Indicates that the JoinDomain
operation failed due to another type of error.
Indicates the status of the gateway as a member of the Active Directory domain.
ACCESS_DENIED: Indicates that the JoinDomain
operation failed due to an authentication error.
DETACHED: Indicates that gateway is not joined to a domain.
JOINED: Indicates that the gateway has successfully joined a domain.
JOINING: Indicates that a JoinDomain
operation is in progress.
NETWORK_ERROR: Indicates that JoinDomain
operation failed due to a network or connectivity error.
TIMEOUT: Indicates that the JoinDomain
operation failed because the operation didn't complete within the allotted time.
UNKNOWN_ERROR: Indicates that the JoinDomain
operation failed due to another type of error.
The status of the High Availability monitoring test. If a test hasn't been performed, the value of this field is null.
" + } + }, "BandwidthDownloadRateLimit": { "base": null, "refs": { @@ -282,7 +297,7 @@ "CloudWatchLogGroupARN": { "base": null, "refs": { - "DescribeGatewayInformationOutput$CloudWatchLogGroupARN": "The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the gateway.
", + "DescribeGatewayInformationOutput$CloudWatchLogGroupARN": "The Amazon Resource Name (ARN) of the Amazon CloudWatch Log Group that is used to monitor events in the gateway.
", "UpdateGatewayInformationInput$CloudWatchLogGroupARN": "The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.
For more information, see What Is Amazon CloudWatch Logs?.
" } }, @@ -467,6 +482,16 @@ "refs": { } }, + "DescribeAvailabilityMonitorTestInput": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityMonitorTestOutput": { + "base": null, + "refs": { + } + }, "DescribeBandwidthRateLimitInput": { "base": "A JSON object containing the of the gateway.
", "refs": { @@ -737,7 +762,7 @@ "DomainUserName": { "base": null, "refs": { - "JoinDomainInput$UserName": "Sets the user name of user who has permission to add the gateway to the Active Directory domain.
" + "JoinDomainInput$UserName": "Sets the user name of user who has permission to add the gateway to the Active Directory domain. The domain user account should be enabled to join computers to the domain. For example, you can use the domain administrator account or an account with delegated permissions to join computers to the domain.
" } }, "DomainUserPassword": { @@ -900,6 +925,8 @@ "DeleteGatewayInput$GatewayARN": null, "DeleteGatewayOutput$GatewayARN": null, "DeleteTapeInput$GatewayARN": "The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.
", + "DescribeAvailabilityMonitorTestInput$GatewayARN": null, + "DescribeAvailabilityMonitorTestOutput$GatewayARN": null, "DescribeBandwidthRateLimitInput$GatewayARN": null, "DescribeBandwidthRateLimitOutput$GatewayARN": null, "DescribeCacheInput$GatewayARN": null, @@ -944,6 +971,8 @@ "SetSMBGuestPasswordOutput$GatewayARN": null, "ShutdownGatewayInput$GatewayARN": null, "ShutdownGatewayOutput$GatewayARN": null, + "StartAvailabilityMonitorTestInput$GatewayARN": null, + "StartAvailabilityMonitorTestOutput$GatewayARN": null, "StartGatewayInput$GatewayARN": null, "StartGatewayOutput$GatewayARN": null, "TapeArchive$RetrievedTo": "The Amazon Resource Name (ARN) of the tape gateway that the virtual tape is being retrieved to.
The virtual tape is retrieved from the virtual tape shelf (VTS).
", @@ -1030,6 +1059,12 @@ "Hosts$member": null } }, + "HostEnvironment": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$HostEnvironment": "The type of hypervisor environment used by the host.
" + } + }, "Hosts": { "base": null, "refs": { @@ -1548,6 +1583,16 @@ "UpdateNFSFileShareInput$Squash": "The user mapped to anonymous user. Valid options are the following:
RootSquash
- Only root is mapped to anonymous user.
NoSquash
- No one is mapped to anonymous user
AllSquash
- Everyone is mapped to anonymous user.
A JSON object containing the of the gateway to start.
", "refs": { @@ -1804,12 +1849,19 @@ "Time": { "base": null, "refs": { + "DescribeAvailabilityMonitorTestOutput$StartTime": "The time the High Availability monitoring test was started. If a test hasn't been performed, the value of this field is null.
", "Tape$TapeCreatedDate": "The date the virtual tape was created.
", "TapeArchive$TapeCreatedDate": "The date the virtual tape was created.
", "TapeArchive$CompletionTime": "The time that the archiving of the virtual tape was completed.
The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.
", "TapeRecoveryPointInfo$TapeRecoveryPointTime": "The time when the point-in-time view of the virtual tape was replicated for later recovery.
The default time stamp format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.
" } }, + "TimeoutInSeconds": { + "base": null, + "refs": { + "JoinDomainInput$TimeoutInSeconds": "Specifies the time in seconds, in which the JoinDomain
operation must complete. The default is 20 seconds.
A JSON object containing one or more of the following fields:
UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec
UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec
Determines whether the transcription job uses speaker recognition to identify different speakers in the input audio. Speaker recognition labels individual speakers in the audio file. If you set the ShowSpeakerLabels
field to true, you must also set the maximum number of speaker labels MaxSpeakerLabels
field.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in the same request. If you set both, your request returns a BadRequestException
.
Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in the same request. If you set both, your request returns a BadRequestException
.
Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in the same request. If you set both, your request returns a BadRequestException
.
Determines whether the transcription contains alternative transcriptions. If you set the ShowAlternatives
field to true, you must also set the maximum number of alternatives to return in the MaxAlternatives
field.
The number of alternative transcriptions that the service should return. If you specify the MaxAlternatives
field, you must set the ShowAlternatives
field to true.
Deletes the specified IP access control group.
You cannot delete an IP access control group that is associated with a directory.
", "DeleteTags": "Deletes the specified tags from the specified WorkSpaces resource.
", "DeleteWorkspaceImage": "Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image and un-share the image if it is shared with other accounts.
", - "DescribeAccount": "Retrieves a list that describes the configuration of bring your own license (BYOL) for the specified account.
", - "DescribeAccountModifications": "Retrieves a list that describes modifications to the configuration of bring your own license (BYOL) for the specified account.
", + "DeregisterWorkspaceDirectory": "Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory.
", + "DescribeAccount": "Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for the specified account.
", + "DescribeAccountModifications": "Retrieves a list that describes modifications to the configuration of Bring Your Own License (BYOL) for the specified account.
", "DescribeClientProperties": "Retrieves a list that describes one or more specified Amazon WorkSpaces clients.
", "DescribeIpGroups": "Describes one or more of your IP access control groups.
", "DescribeTags": "Describes the specified tags for the specified WorkSpaces resource.
", "DescribeWorkspaceBundles": "Retrieves a list that describes the available WorkSpace bundles.
You can filter the results using either bundle ID or owner, but not both.
", - "DescribeWorkspaceDirectories": "Describes the available AWS Directory Service directories that are registered with Amazon WorkSpaces.
", + "DescribeWorkspaceDirectories": "Describes the available directories that are registered with Amazon WorkSpaces.
", "DescribeWorkspaceImages": "Retrieves a list that describes one or more specified images, if the image identifiers are provided. Otherwise, all images in the account are described.
", "DescribeWorkspaceSnapshots": "Describes the snapshots for the specified WorkSpace.
", "DescribeWorkspaces": "Describes the specified WorkSpaces.
You can filter the results by using the bundle identifier, directory identifier, or owner, but you can specify only one filter at a time.
", "DescribeWorkspacesConnectionStatus": "Describes the connection status of the specified WorkSpaces.
", "DisassociateIpGroups": "Disassociates the specified IP access control group from the specified directory.
", - "ImportWorkspaceImage": "Imports the specified Windows 7 or Windows 10 bring your own license (BYOL) image into Amazon WorkSpaces. The image must be an already licensed EC2 image that is in your AWS account, and you must own the image.
", - "ListAvailableManagementCidrRanges": "Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable bring your own license (BYOL).
The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.
", - "ModifyAccount": "Modifies the configuration of bring your own license (BYOL) for the specified account.
", + "ImportWorkspaceImage": "Imports the specified Windows 7 or Windows 10 Bring Your Own License (BYOL) image into Amazon WorkSpaces. The image must be an already licensed EC2 image that is in your AWS account, and you must own the image.
", + "ListAvailableManagementCidrRanges": "Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).
The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.
", + "ModifyAccount": "Modifies the configuration of Bring Your Own License (BYOL) for the specified account.
", "ModifyClientProperties": "Modifies the properties of the specified Amazon WorkSpaces clients.
", + "ModifySelfservicePermissions": "Modifies the self-service WorkSpace management capabilities for your users. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.
", + "ModifyWorkspaceAccessProperties": "Specifies which devices and operating systems users can use to access their Workspaces. For more information, see Control Device Access.
", + "ModifyWorkspaceCreationProperties": "Modify the default properties used to create WorkSpaces.
", "ModifyWorkspaceProperties": "Modifies the specified WorkSpace properties.
", "ModifyWorkspaceState": "Sets the state of the specified WorkSpace.
To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE
. WorkSpaces in this state do not respond to requests to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE
state.
Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is AVAILABLE
or UNHEALTHY
.
This operation is asynchronous and returns before the WorkSpaces have rebooted.
", "RebuildWorkspaces": "Rebuilds the specified WorkSpace.
You cannot rebuild a WorkSpace unless its state is AVAILABLE
, ERROR
, or UNHEALTHY
.
Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.
This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.
", + "RegisterWorkspaceDirectory": "Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.
", "RestoreWorkspace": "Restores the specified WorkSpace to its last known healthy state.
You cannot restore a WorkSpace unless its state is AVAILABLE
, ERROR
, or UNHEALTHY
.
Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.
This operation is asynchronous and returns before the WorkSpace is completely restored.
", "RevokeIpRules": "Removes one or more rules from the specified IP access control group.
", "StartWorkspaces": "Starts the specified WorkSpaces.
You cannot start a WorkSpace unless it has a running mode of AutoStop
and a state of STOPPED
.
Indicates whether users can use Windows clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST
. For more information, see Restrict WorkSpaces Access to Trusted Devices.
Indicates whether users can use macOS clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST
. For more information, see Restrict WorkSpaces Access to Trusted Devices.
Indicates whether users can access their WorkSpaces through a web browser.
", + "WorkspaceAccessProperties$DeviceTypeIos": "Indicates whether users can use iOS devices to access their WorkSpaces.
", + "WorkspaceAccessProperties$DeviceTypeAndroid": "Indicates whether users can use Android devices to access their WorkSpaces.
", + "WorkspaceAccessProperties$DeviceTypeChromeOs": "Indicates whether users can use Chromebooks to access their WorkSpaces.
", + "WorkspaceAccessProperties$DeviceTypeZeroClient": "Indicates whether users can use zero client devices to access their WorkSpaces.
" + } + }, "AccountModification": { - "base": "Describes a modification to the configuration of bring your own license (BYOL) for the specified account.
", + "base": "Describes a modification to the configuration of Bring Your Own License (BYOL) for the specified account.
", "refs": { "AccountModificationList$member": null } @@ -93,9 +110,15 @@ "refs": { "DefaultWorkspaceCreationProperties$EnableWorkDocs": "Specifies whether the directory is enabled for Amazon WorkDocs.
", "DefaultWorkspaceCreationProperties$EnableInternetAccess": "Specifies whether to automatically assign a public IP address to WorkSpaces in this directory by default. If enabled, the public IP address allows outbound internet access from your WorkSpaces when you’re using an internet gateway in the Amazon VPC in which your WorkSpaces are located. If you're using a Network Address Translation (NAT) gateway for outbound internet access from your VPC, or if your WorkSpaces are in public subnets and you manually assign them Elastic IP addresses, you should disable this setting. This setting applies to new WorkSpaces that you launch or to existing WorkSpaces that you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces.
", - "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "Specifies whether the WorkSpace user is an administrator on the WorkSpace.
", + "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "Specifies whether WorkSpace users are local administrators on their WorkSpaces.
", + "DefaultWorkspaceCreationProperties$EnableMaintenanceMode": "Specifies whether maintenance mode is enabled for WorkSpaces. For more information, see WorkSpace Maintenance.
", + "RegisterWorkspaceDirectoryRequest$EnableWorkDocs": "Indicates whether Amazon WorkDocs is enabled or disabled. If you have enabled this parameter and WorkDocs is not available in the Region, you will receive an OperationNotSupportedException error. Set EnableWorkDocs
to disabled, and try again.
Indicates whether self-service capabilities are enabled or disabled.
", "Workspace$UserVolumeEncryptionEnabled": "Indicates whether the data stored on the user volume is encrypted.
", "Workspace$RootVolumeEncryptionEnabled": "Indicates whether the data stored on the root volume is encrypted.
", + "WorkspaceCreationProperties$EnableInternetAccess": "Indicates whether internet access is enabled for your WorkSpaces.
", + "WorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "Indicates whether users are local administrators of their WorkSpaces.
", + "WorkspaceCreationProperties$EnableMaintenanceMode": "Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.
", "WorkspaceRequest$UserVolumeEncryptionEnabled": "Indicates whether the data stored on the user volume is encrypted.
", "WorkspaceRequest$RootVolumeEncryptionEnabled": "Indicates whether the data stored on the root volume is encrypted.
" } @@ -250,11 +273,12 @@ "DefaultOu": { "base": null, "refs": { - "DefaultWorkspaceCreationProperties$DefaultOu": "The organizational unit (OU) in the directory for the WorkSpace machine accounts.
" + "DefaultWorkspaceCreationProperties$DefaultOu": "The organizational unit (OU) in the directory for the WorkSpace machine accounts.
", + "WorkspaceCreationProperties$DefaultOu": "The default organizational unit (OU) for your WorkSpace directories.
" } }, "DefaultWorkspaceCreationProperties": { - "base": "Describes the default values used to create a WorkSpace.
", + "base": "Describes the default values that are used to create WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.
", "refs": { "WorkspaceDirectory$WorkspaceCreationProperties": "The default creation properties for all WorkSpaces in the directory.
" } @@ -289,6 +313,16 @@ "refs": { } }, + "DeregisterWorkspaceDirectoryRequest": { + "base": null, + "refs": { + } + }, + "DeregisterWorkspaceDirectoryResult": { + "base": null, + "refs": { + } + }, "DescribeAccountModificationsRequest": { "base": null, "refs": { @@ -414,9 +448,14 @@ "base": null, "refs": { "AssociateIpGroupsRequest$DirectoryId": "The identifier of the directory.
", + "DeregisterWorkspaceDirectoryRequest$DirectoryId": "The identifier of the directory. If any WorkSpaces are registered to this directory, you must remove them before you deregister the directory, or you will receive an OperationNotSupportedException error.
", "DescribeWorkspacesRequest$DirectoryId": "The identifier of the directory. In addition, you can optionally specify a specific directory user (see UserName
). You cannot combine this parameter with any other filter.
The identifier of the directory.
", + "ModifySelfservicePermissionsRequest$ResourceId": "The identifier of the directory.
", + "ModifyWorkspaceAccessPropertiesRequest$ResourceId": "The identifier of the directory.
", + "ModifyWorkspaceCreationPropertiesRequest$ResourceId": "The identifier of the directory.
", + "RegisterWorkspaceDirectoryRequest$DirectoryId": "The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again.
", "Workspace$DirectoryId": "The identifier of the AWS Directory Service directory for the WorkSpace.
", "WorkspaceDirectory$DirectoryId": "The directory identifier.
", "WorkspaceRequest$DirectoryId": "The identifier of the AWS Directory Service directory for the WorkSpace. You can use DescribeWorkspaceDirectories to list the available directories.
" @@ -483,7 +522,9 @@ "ResourceLimitExceededException$message": "The exception error message.
", "ResourceNotFoundException$message": "The resource could not be found.
", "ResourceUnavailableException$message": "The exception error message.
", - "UnsupportedWorkspaceConfigurationException$message": null + "UnsupportedNetworkConfigurationException$message": null, + "UnsupportedWorkspaceConfigurationException$message": null, + "WorkspacesDefaultRoleNotFoundException$message": null } }, "FailedCreateWorkspaceRequest": { @@ -638,6 +679,7 @@ "base": null, "refs": { "DescribeIpGroupsRequest$MaxResults": "The maximum number of items to return.
", + "DescribeWorkspaceDirectoriesRequest$Limit": "The maximum number of directories to return.
", "DescribeWorkspaceImagesRequest$MaxResults": "The maximum number of items to return.
", "DescribeWorkspacesRequest$Limit": "The maximum number of items to return.
" } @@ -708,6 +750,36 @@ "refs": { } }, + "ModifySelfservicePermissionsRequest": { + "base": null, + "refs": { + } + }, + "ModifySelfservicePermissionsResult": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceAccessPropertiesRequest": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceAccessPropertiesResult": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceCreationPropertiesRequest": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceCreationPropertiesResult": { + "base": null, + "refs": { + } + }, "ModifyWorkspacePropertiesRequest": { "base": null, "refs": { @@ -835,7 +907,12 @@ "ReconnectEnum": { "base": null, "refs": { - "ClientProperties$ReconnectEnabled": "Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.
" + "ClientProperties$ReconnectEnabled": "Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.
", + "SelfservicePermissions$RestartWorkspace": "Specifies whether users can restart their WorkSpace.
", + "SelfservicePermissions$IncreaseVolumeSize": "Specifies whether users can increase the volume size of the drives on their WorkSpace.
", + "SelfservicePermissions$ChangeComputeType": "Specifies whether users can change the compute type (bundle) for their WorkSpace.
", + "SelfservicePermissions$SwitchRunningMode": "Specifies whether users can switch the running mode of their WorkSpace.
", + "SelfservicePermissions$RebuildWorkspace": "Specifies whether users can rebuild the operating system of a WorkSpace to its original state.
" } }, "Region": { @@ -844,6 +921,16 @@ "CopyWorkspaceImageRequest$SourceRegion": "The identifier of the source Region.
" } }, + "RegisterWorkspaceDirectoryRequest": { + "base": null, + "refs": { + } + }, + "RegisterWorkspaceDirectoryResult": { + "base": null, + "refs": { + } + }, "RegistrationCode": { "base": null, "refs": { @@ -934,9 +1021,17 @@ "base": null, "refs": { "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "The identifier of any security groups to apply to WorkSpaces when they are created.
", + "WorkspaceCreationProperties$CustomSecurityGroupId": "The identifier of your custom security group.
", "WorkspaceDirectory$WorkspaceSecurityGroupId": "The identifier of the security group that is assigned to new WorkSpaces.
" } }, + "SelfservicePermissions": { + "base": "Describes the self-service permissions for a directory. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.
", + "refs": { + "ModifySelfservicePermissionsRequest$SelfservicePermissions": "The permissions to enable or disable self-service capabilities.
", + "WorkspaceDirectory$SelfservicePermissions": "The default self-service permissions for WorkSpaces in the directory.
" + } + }, "Snapshot": { "base": "Describes a snapshot.
", "refs": { @@ -1004,6 +1099,7 @@ "SubnetIds": { "base": null, "refs": { + "RegisterWorkspaceDirectoryRequest$SubnetIds": "The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an OperationNotSupportedException error.
", "WorkspaceDirectory$SubnetIds": "The identifiers of the subnets used with the directory.
" } }, @@ -1033,6 +1129,7 @@ "CreateTagsRequest$Tags": "The tags. Each WorkSpaces resource can have a maximum of 50 tags.
", "DescribeTagsResult$TagList": "The tags.
", "ImportWorkspaceImageRequest$Tags": "The tags. Each WorkSpaces resource can have a maximum of 50 tags.
", + "RegisterWorkspaceDirectoryRequest$Tags": "The tags associated with the directory.
", "WorkspaceRequest$Tags": "The tags for the WorkSpace.
" } }, @@ -1048,6 +1145,13 @@ "ModifyWorkspaceStateRequest$WorkspaceState": "The WorkSpace state.
" } }, + "Tenancy": { + "base": null, + "refs": { + "RegisterWorkspaceDirectoryRequest$Tenancy": "Indicates whether your WorkSpace directory is dedicated or shared. To use Bring Your Own License (BYOL) images, this value must be set to DEDICATED
and your AWS account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images.
Specifies whether the directory is dedicated or shared. To use Bring Your Own License (BYOL), this value must be set to DEDICATED
. For more information, see Bring Your Own Windows Desktop Images.
Describes the information used to terminate a WorkSpace.
", "refs": { @@ -1079,8 +1183,13 @@ "WorkspaceConnectionStatus$LastKnownUserConnectionTimestamp": "The timestamp of the last known user connection.
" } }, + "UnsupportedNetworkConfigurationException": { + "base": "The configuration of this network is not supported for this operation, or your network configuration conflicts with the Amazon WorkSpaces management network IP range. For more information, see Configure a VPC for Amazon WorkSpaces.
", + "refs": { + } + }, "UnsupportedWorkspaceConfigurationException": { - "base": "The configuration of this WorkSpace is not supported for this operation. For more information, see the Amazon WorkSpaces Administration Guide.
", + "base": "The configuration of this WorkSpace is not supported for this operation. For more information, see Required Configuration and Service Components for WorkSpaces .
", "refs": { } }, @@ -1100,7 +1209,7 @@ "DescribeWorkspacesRequest$UserName": "The name of the directory user. You must specify this parameter with DirectoryId
.
The user for the WorkSpace.
", "WorkspaceDirectory$CustomerUserName": "The user name for the service account.
", - "WorkspaceRequest$UserName": "The username of the user for the WorkSpace. This username must exist in the AWS Directory Service directory for the WorkSpace.
" + "WorkspaceRequest$UserName": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.
" } }, "UserStorage": { @@ -1128,6 +1237,13 @@ "WorkspaceList$member": null } }, + "WorkspaceAccessProperties": { + "base": "The device types and operating systems that can be used to access a WorkSpace. For more information, see Amazon WorkSpaces Client Network Requirements.
", + "refs": { + "ModifyWorkspaceAccessPropertiesRequest$WorkspaceAccessProperties": "The device types and operating systems to enable or disable for access.
", + "WorkspaceDirectory$WorkspaceAccessProperties": "The devices and operating systems that users can use to access Workspaces.
" + } + }, "WorkspaceBundle": { "base": "Describes a WorkSpace bundle.
", "refs": { @@ -1146,8 +1262,14 @@ "DescribeWorkspacesConnectionStatusResult$WorkspacesConnectionStatus": "Information about the connection status of the WorkSpace.
" } }, + "WorkspaceCreationProperties": { + "base": "Describes the default properties that are used for creating WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.
", + "refs": { + "ModifyWorkspaceCreationPropertiesRequest$WorkspaceCreationProperties": "The default properties for creating WorkSpaces.
" + } + }, "WorkspaceDirectory": { - "base": "Describes an AWS Directory Service directory that is used with Amazon WorkSpaces.
", + "base": "Describes a directory that is used with Amazon WorkSpaces.
", "refs": { "DirectoryList$member": null } @@ -1155,7 +1277,7 @@ "WorkspaceDirectoryState": { "base": null, "refs": { - "WorkspaceDirectory$State": "The state of the directory's registration with Amazon WorkSpaces
" + "WorkspaceDirectory$State": "The state of the directory's registration with Amazon WorkSpaces.
" } }, "WorkspaceDirectoryType": { @@ -1256,7 +1378,7 @@ "WorkspaceImageRequiredTenancy": { "base": null, "refs": { - "WorkspaceImage$RequiredTenancy": "Specifies whether the image is running on dedicated hardware. When bring your own license (BYOL) is enabled, this value is set to DEDICATED.
" + "WorkspaceImage$RequiredTenancy": "Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED
. For more information, see Bring Your Own Windows Desktop Images.
The operational state of the WorkSpace.
" } }, + "WorkspacesDefaultRoleNotFoundException": { + "base": "The workspaces_DefaultRole role could not be found. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.
", + "refs": { + } + }, "WorkspacesIpGroup": { "base": "Describes an IP access control group.
", "refs": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index e6c4fbe2646..47afed048f2 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1002,6 +1002,21 @@ "us-west-2" : { } } }, + "dataexchange" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "datapipeline" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1563,11 +1578,16 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2631,6 +2651,10 @@ "qldb" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3306,6 +3330,10 @@ "session.qldb" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3315,25 +3343,10 @@ "shield" : { "defaults" : { "protocols" : [ "https" ], - "sslCommonName" : "shield.ca-central-1.amazonaws.com" + "sslCommonName" : "shield.us-east-1.amazonaws.com" }, "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { } }, "isRegionalized" : false }, @@ -4000,6 +4013,11 @@ "cn-northwest-1" : { } } }, + "dax" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, "directconnect" : { "endpoints" : { "cn-north-1" : { }, @@ -4345,6 +4363,11 @@ "hostname" : "cn.transcribe.cn-northwest-1.amazonaws.com.cn" } } + }, + "workspaces" : { + "endpoints" : { + "cn-northwest-1" : { } + } } } }, { diff --git a/service/applicationdiscoveryservice/api_doc.go b/service/applicationdiscoveryservice/api_doc.go index cb7fd8bb7df..3f575c2673c 100644 --- a/service/applicationdiscoveryservice/api_doc.go +++ b/service/applicationdiscoveryservice/api_doc.go @@ -59,6 +59,10 @@ // This guide is intended for use with the AWS Application Discovery Service // User Guide (http://docs.aws.amazon.com/application-discovery/latest/userguide/). // +// Remember that you must set your AWS Migration Hub home region before you +// call any of these APIs, or a HomeRegionNotSetException error will be returned. +// Also, you must make the API calls while in your home region. +// // See https://docs.aws.amazon.com/goto/WebAPI/discovery-2015-11-01 for more information on this service. // // See applicationdiscoveryservice package documentation for more information. diff --git a/service/applicationdiscoveryservice/api_errors.go b/service/applicationdiscoveryservice/api_errors.go index e1d94d305c7..739f76b81a4 100644 --- a/service/applicationdiscoveryservice/api_errors.go +++ b/service/applicationdiscoveryservice/api_errors.go @@ -15,6 +15,12 @@ const ( // "ConflictErrorException". ErrCodeConflictErrorException = "ConflictErrorException" + // ErrCodeHomeRegionNotSetException for service response error code + // "HomeRegionNotSetException". + // + // The home region is not set. Set the home region to continue. + ErrCodeHomeRegionNotSetException = "HomeRegionNotSetException" + // ErrCodeInvalidParameterException for service response error code // "InvalidParameterException". // diff --git a/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go b/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go index ab7be5b71c1..8d7645934f3 100644 --- a/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go +++ b/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go @@ -12,7 +12,7 @@ import ( type DescribeExportConfigurationsInput struct { _ struct{} `type:"structure"` - // A list of continuous export ids to search for. + // A list of continuous export IDs to search for. ExportIds []string `locationName:"exportIds" type:"list"` // A number between 1 and 100 specifying the maximum number of continuous export diff --git a/service/autoscaling/api_op_CreateAutoScalingGroup.go b/service/autoscaling/api_op_CreateAutoScalingGroup.go index e35ddabf8b4..e70b3375162 100644 --- a/service/autoscaling/api_op_CreateAutoScalingGroup.go +++ b/service/autoscaling/api_op_CreateAutoScalingGroup.go @@ -104,6 +104,11 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. LoadBalancerNames []string `type:"list"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the group. // // MaxSize is a required field diff --git a/service/autoscaling/api_op_CreateLaunchConfiguration.go b/service/autoscaling/api_op_CreateLaunchConfiguration.go index 20664ae727c..ca07b0b886a 100644 --- a/service/autoscaling/api_op_CreateLaunchConfiguration.go +++ b/service/autoscaling/api_op_CreateLaunchConfiguration.go @@ -146,7 +146,7 @@ type CreateLaunchConfigurationInput struct { // For more information, see Instance Placement Tenancy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-vpc-tenancy) // in the Amazon EC2 Auto Scaling User Guide. // - // Valid values: default | dedicated + // Valid Values: default | dedicated PlacementTenancy *string `min:"1" type:"string"` // The ID of the RAM disk to select. @@ -166,17 +166,13 @@ type CreateLaunchConfigurationInput struct { // The maximum hourly price to be paid for any Spot Instance launched to fulfill // the request. Spot Instances are launched when the price you specify exceeds - // the current Spot market price. For more information, see Launching Spot Instances + // the current Spot price. For more information, see Launching Spot Instances // in Your Auto Scaling Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) // in the Amazon EC2 Auto Scaling User Guide. // - // If a Spot price is set, then the Auto Scaling group will only launch instances - // when the Spot price has been met, regardless of the setting in the Auto Scaling - // group's DesiredCapacity. - // - // When you change your Spot price by creating a new launch configuration, running - // instances will continue to run as long as the Spot price for those running - // instances is higher than the current Spot market price. + // When you change your maximum price by creating a new launch configuration, + // running instances will continue to run as long as the maximum price for those + // running instances is higher than the current Spot price. SpotPrice *string `min:"1" type:"string"` // The Base64-encoded user data to make available to the launched EC2 instances. diff --git a/service/autoscaling/api_op_EnterStandby.go b/service/autoscaling/api_op_EnterStandby.go index 26b54b08f82..09a04dd8172 100644 --- a/service/autoscaling/api_op_EnterStandby.go +++ b/service/autoscaling/api_op_EnterStandby.go @@ -72,6 +72,15 @@ const opEnterStandby = "EnterStandby" // // Moves the specified instances into the standby state. // +// If you choose to decrement the desired capacity of the Auto Scaling group, +// the instances can enter standby as long as the desired capacity of the Auto +// Scaling group after the instances are placed into standby is equal to or +// greater than the minimum capacity of the group. +// +// If you choose not to decrement the desired capacity of the Auto Scaling group, +// the Auto Scaling group launches new instances to replace the instances on +// standby. +// // For more information, see Temporarily Removing Instances from Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html) // in the Amazon EC2 Auto Scaling User Guide. diff --git a/service/autoscaling/api_op_ExitStandby.go b/service/autoscaling/api_op_ExitStandby.go index a13b9a1e578..90d96be1344 100644 --- a/service/autoscaling/api_op_ExitStandby.go +++ b/service/autoscaling/api_op_ExitStandby.go @@ -62,6 +62,8 @@ const opExitStandby = "ExitStandby" // // Moves the specified instances out of the standby state. // +// After you put the instances back in service, the desired capacity is incremented. +// // For more information, see Temporarily Removing Instances from Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html) // in the Amazon EC2 Auto Scaling User Guide. diff --git a/service/autoscaling/api_op_UpdateAutoScalingGroup.go b/service/autoscaling/api_op_UpdateAutoScalingGroup.go index 56c255e6a67..8e285dbd12e 100644 --- a/service/autoscaling/api_op_UpdateAutoScalingGroup.go +++ b/service/autoscaling/api_op_UpdateAutoScalingGroup.go @@ -55,11 +55,6 @@ type UpdateAutoScalingGroupInput struct { // The name of the launch configuration. If you specify LaunchConfigurationName // in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy. - // - // To update an Auto Scaling group with a launch configuration with InstanceMonitoring - // set to false, you must first disable the collection of group metrics. Otherwise, - // you get an error. If you have previously enabled the collection of group - // metrics, you can disable it using DisableMetricsCollection. LaunchConfigurationName *string `min:"1" type:"string"` // The launch template and version to use to specify the updates. If you specify @@ -70,6 +65,11 @@ type UpdateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling API Reference. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` @@ -193,8 +193,7 @@ const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" // To update an Auto Scaling group, specify the name of the group and the parameter // that you want to change. Any parameters that you don't specify are not changed // by this update request. The new settings take effect on any scaling activities -// after this call returns. Scaling activities that are currently in progress -// aren't affected. +// after this call returns. // // If you associate a new launch configuration or template with an Auto Scaling // group, all new instances will get the updated configuration. Existing instances diff --git a/service/autoscaling/api_types.go b/service/autoscaling/api_types.go index b18f4126f67..a4c029d391b 100644 --- a/service/autoscaling/api_types.go +++ b/service/autoscaling/api_types.go @@ -155,6 +155,11 @@ type AutoScalingGroup struct { // One or more load balancers associated with the group. LoadBalancerNames []string `type:"list"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the group. // // MaxSize is a required field @@ -230,6 +235,9 @@ type AutoScalingInstanceDetails struct { // InstanceId is a required field InstanceId *string `min:"1" type:"string" required:"true"` + // The instance type of the EC2 instance. + InstanceType *string `min:"1" type:"string"` + // The launch configuration used to launch the instance. This value is not available // if you attached the instance to the Auto Scaling group. LaunchConfigurationName *string `min:"1" type:"string"` @@ -247,6 +255,12 @@ type AutoScalingInstanceDetails struct { // // ProtectedFromScaleIn is a required field ProtectedFromScaleIn *bool `type:"boolean" required:"true"` + + // The number of capacity units contributed by the instance based on its instance + // type. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -453,7 +467,7 @@ type Ebs struct { // or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // - // Valid values: standard | io1 | gp2 | st1 | sc1 + // Valid Values: standard | io1 | gp2 | st1 | sc1 VolumeType *string `min:"1" type:"string"` } @@ -576,6 +590,9 @@ type Instance struct { // InstanceId is a required field InstanceId *string `min:"1" type:"string" required:"true"` + // The instance type of the EC2 instance. + InstanceType *string `min:"1" type:"string"` + // The launch configuration associated with the instance. LaunchConfigurationName *string `min:"1" type:"string"` @@ -593,6 +610,12 @@ type Instance struct { // // ProtectedFromScaleIn is a required field ProtectedFromScaleIn *bool `type:"boolean" required:"true"` + + // The number of capacity units contributed by the instance based on its instance + // type. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -619,6 +642,14 @@ func (s InstanceMonitoring) String() string { // and Spot Instances, the maximum price to pay for Spot Instances, and how // the Auto Scaling group allocates instance types to fulfill On-Demand and // Spot capacity. +// +// When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, +// this update action does not deploy any changes across the running Amazon +// EC2 instances in the group. Your existing Spot Instances continue to run +// as long as the maximum price for those instances is higher than the current +// Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances +// based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling +// terminates instances according to the group's termination policies. type InstancesDistribution struct { _ struct{} `type:"structure"` @@ -637,16 +668,28 @@ type InstancesDistribution struct { // by On-Demand Instances. This base portion is provisioned first as your group // scales. // - // The default value is 0. If you leave this parameter set to 0, On-Demand Instances - // are launched as a percentage of the Auto Scaling group's desired capacity, - // per the OnDemandPercentageAboveBaseCapacity setting. + // Default if not set is 0. If you leave it set to 0, On-Demand Instances are + // launched as a percentage of the Auto Scaling group's desired capacity, per + // the OnDemandPercentageAboveBaseCapacity setting. + // + // An update to this setting means a gradual replacement of instances to maintain + // the specified number of On-Demand Instances for your base capacity. When + // replacing instances, Amazon EC2 Auto Scaling launches new instances before + // terminating the old ones. OnDemandBaseCapacity *int64 `type:"integer"` // Controls the percentages of On-Demand Instances and Spot Instances for your - // additional capacity beyond OnDemandBaseCapacity. The range is 0–100. + // additional capacity beyond OnDemandBaseCapacity. // - // The default value is 100. If you leave this parameter set to 100, the percentages - // are 100% for On-Demand Instances and 0% for Spot Instances. + // Default if not set is 100. If you leave it set to 100, the percentages are + // 100% for On-Demand Instances and 0% for Spot Instances. + // + // An update to this setting means a gradual replacement of instances to maintain + // the percentage of On-Demand Instances for your additional capacity above + // the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches + // new instances before terminating the old ones. + // + // Valid Range: Minimum value of 0. Maximum value of 100. OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"` // Indicates how to allocate instances across Spot Instance pools. @@ -666,9 +709,11 @@ type InstancesDistribution struct { // The number of Spot Instance pools across which to allocate your Spot Instances. // The Spot pools are determined from the different instance types in the Overrides - // array of LaunchTemplate. The range is 1–20. The default value is 2. + // array of LaunchTemplate. Default if not set is 2. + // + // Used only when the Spot allocation strategy is lowest-price. // - // Valid only when the Spot allocation strategy is lowest-price. + // Valid Range: Minimum value of 1. Maximum value of 20. SpotInstancePools *int64 `type:"integer"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -801,7 +846,7 @@ type LaunchConfiguration struct { // The maximum hourly price to be paid for any Spot Instance launched to fulfill // the request. Spot Instances are launched when the price you specify exceeds - // the current Spot market price. + // the current Spot price. // // For more information, see Launching Spot Instances in Your Auto Scaling Group // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) @@ -825,6 +870,11 @@ func (s LaunchConfiguration) String() string { // The overrides are used to override the instance type specified by the launch // template with multiple instance types that can be used to launch On-Demand // Instances and Spot Instances. +// +// When you update the launch template or overrides, existing Amazon EC2 instances +// continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches +// instances to match the new settings. When scale in occurs, Amazon EC2 Auto +// Scaling terminates instances according to the group's termination policies. type LaunchTemplate struct { _ struct{} `type:"structure"` @@ -832,9 +882,9 @@ type LaunchTemplate struct { // or launch template name in the request. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // Any parameters that you specify override the same parameters in the launch - // template. Currently, the only supported override is instance type. You must - // specify between 2 and 20 overrides. + // An optional setting. Any parameters that you specify override the same parameters + // in the launch template. Currently, the only supported override is instance + // type. You can specify between 1 and 20 instance types. Overrides []LaunchTemplateOverrides `type:"list"` } @@ -875,6 +925,15 @@ type LaunchTemplateOverrides struct { // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) // in the Amazon Elastic Compute Cloud User Guide. InstanceType *string `min:"1" type:"string"` + + // The number of capacity units, which gives the instance type a proportional + // weight to other instance types. For example, larger instance types are generally + // weighted more than smaller instance types. These are the same units that + // you chose to set the desired capacity in terms of instances, or a performance + // attribute such as vCPUs, memory, or I/O. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -888,6 +947,9 @@ func (s *LaunchTemplateOverrides) Validate() error { if s.InstanceType != nil && len(*s.InstanceType) < 1 { invalidParams.Add(aws.NewErrParamMinLen("InstanceType", 1)) } + if s.WeightedCapacity != nil && len(*s.WeightedCapacity) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("WeightedCapacity", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -1285,8 +1347,8 @@ type MixedInstancesPolicy struct { // The instances distribution to use. // - // If you leave this parameter unspecified when creating a mixed instances policy, - // the default values are used. + // If you leave this parameter unspecified, the value for each parameter in + // InstancesDistribution uses a default value. InstancesDistribution *InstancesDistribution `type:"structure"` // The launch template and instance types (overrides). @@ -1350,13 +1412,7 @@ func (s NotificationConfiguration) String() string { type PredefinedMetricSpecification struct { _ struct{} `type:"structure"` - // The metric type. - // - // PredefinedMetricType is a required field - PredefinedMetricType MetricType `type:"string" required:"true" enum:"true"` - - // Identifies the resource associated with the metric type. The following predefined - // metrics are available: + // The metric type. The following predefined metrics are available: // // * ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling // group. @@ -1370,15 +1426,21 @@ type PredefinedMetricSpecification struct { // * ALBRequestCountPerTarget - Number of requests completed per target in // an Application Load Balancer target group. // - // For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, - // and ASGAverageNetworkOut, the parameter must not be specified as the resource - // associated with the metric type is the Auto Scaling group. For predefined - // metric type ALBRequestCountPerTarget, the parameter must be specified in - // the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id - // , where app/load-balancer-name/load-balancer-id is the final portion of the - // load balancer ARN, and targetgroup/target-group-name/target-group-id is the - // final portion of the target group ARN. The target group must be attached - // to the Auto Scaling group. + // PredefinedMetricType is a required field + PredefinedMetricType MetricType `type:"string" required:"true" enum:"true"` + + // Identifies the resource associated with the metric type. You can't specify + // a resource label unless the metric type is ALBRequestCountPerTarget and there + // is a target group attached to the Auto Scaling group. + // + // The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id + // , where + // + // * app/load-balancer-name/load-balancer-id is the final portion of the + // load balancer ARN, and + // + // * targetgroup/target-group-name/target-group-id is the final portion of + // the target group ARN. ResourceLabel *string `min:"1" type:"string"` } diff --git a/service/chime/api_doc.go b/service/chime/api_doc.go index 104da361c0a..b41b2ff879d 100644 --- a/service/chime/api_doc.go +++ b/service/chime/api_doc.go @@ -4,10 +4,13 @@ // requests to Amazon Chime. // // The Amazon Chime API (application programming interface) is designed for -// administrators to use to perform key tasks, such as creating and managing -// Amazon Chime accounts and users. This guide provides detailed information +// developers to perform key tasks, such as creating and managing Amazon Chime +// accounts, users, and Voice Connectors. This guide provides detailed information // about the Amazon Chime API, including operations, types, inputs and outputs, -// and error codes. +// and error codes. It also includes some server-side API actions to use with +// the Amazon Chime SDK. For more information about the Amazon Chime SDK, see +// Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. // // You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the // REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. @@ -41,9 +44,9 @@ // https://service.chime.aws.amazon.com. // // Administrative permissions are controlled using AWS Identity and Access Management -// (IAM). For more information, see Control Access to the Amazon Chime Console -// (https://docs.aws.amazon.com/chime/latest/ag/control-access.html) in the -// Amazon Chime Administration Guide. +// (IAM). For more information, see Identity and Access Management for Amazon +// Chime (https://docs.aws.amazon.com/chime/latest/ag/security-iam.html) in +// the Amazon Chime Administration Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01 for more information on this service. // diff --git a/service/chime/api_enums.go b/service/chime/api_enums.go index a144226bec5..256a2315fe2 100644 --- a/service/chime/api_enums.go +++ b/service/chime/api_enums.go @@ -140,6 +140,24 @@ func (enum License) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type MemberType string + +// Enum values for MemberType +const ( + MemberTypeUser MemberType = "User" + MemberTypeBot MemberType = "Bot" + MemberTypeWebhook MemberType = "Webhook" +) + +func (enum MemberType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MemberType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type OrderedPhoneNumberStatus string // Enum values for OrderedPhoneNumberStatus @@ -288,6 +306,23 @@ func (enum RegistrationStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type RoomMembershipRole string + +// Enum values for RoomMembershipRole +const ( + RoomMembershipRoleAdministrator RoomMembershipRole = "Administrator" + RoomMembershipRoleMember RoomMembershipRole = "Member" +) + +func (enum RoomMembershipRole) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RoomMembershipRole) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type VoiceConnectorAwsRegion string // Enum values for VoiceConnectorAwsRegion diff --git a/service/chime/api_op_BatchCreateAttendee.go b/service/chime/api_op_BatchCreateAttendee.go new file mode 100644 index 00000000000..5342cb19ddc --- /dev/null +++ b/service/chime/api_op_BatchCreateAttendee.go @@ -0,0 +1,197 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type BatchCreateAttendeeInput struct { + _ struct{} `type:"structure"` + + // The request containing the attendees to create. + // + // Attendees is a required field + Attendees []CreateAttendeeRequestItem `type:"list" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCreateAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreateAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchCreateAttendeeInput"} + + if s.Attendees == nil { + invalidParams.Add(aws.NewErrParamRequired("Attendees")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + if s.Attendees != nil { + for i, v := range s.Attendees { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attendees", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type BatchCreateAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The attendee information, including attendees IDs and join tokens. + Attendees []Attendee `type:"list"` + + // If the action fails for one or more of the attendees in the request, a list + // of the attendees is returned, along with error codes and error messages. + Errors []CreateAttendeeError `type:"list"` +} + +// String returns the string representation +func (s BatchCreateAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opBatchCreateAttendee = "BatchCreateAttendee" + +// BatchCreateAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For +// more information about the Amazon Chime SDK, see Using the Amazon Chime SDK +// (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) in the Amazon +// Chime Developer Guide. +// +// // Example sending a request using BatchCreateAttendeeRequest. +// req := client.BatchCreateAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/BatchCreateAttendee +func (c *Client) BatchCreateAttendeeRequest(input *BatchCreateAttendeeInput) BatchCreateAttendeeRequest { + op := &aws.Operation{ + Name: opBatchCreateAttendee, + HTTPMethod: "POST", + HTTPPath: "/meetings/{meetingId}/attendees?operation=batch-create", + } + + if input == nil { + input = &BatchCreateAttendeeInput{} + } + + req := c.newRequest(op, input, &BatchCreateAttendeeOutput{}) + return BatchCreateAttendeeRequest{Request: req, Input: input, Copy: c.BatchCreateAttendeeRequest} +} + +// BatchCreateAttendeeRequest is the request type for the +// BatchCreateAttendee API operation. +type BatchCreateAttendeeRequest struct { + *aws.Request + Input *BatchCreateAttendeeInput + Copy func(*BatchCreateAttendeeInput) BatchCreateAttendeeRequest +} + +// Send marshals and sends the BatchCreateAttendee API request. +func (r BatchCreateAttendeeRequest) Send(ctx context.Context) (*BatchCreateAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchCreateAttendeeResponse{ + BatchCreateAttendeeOutput: r.Request.Data.(*BatchCreateAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchCreateAttendeeResponse is the response type for the +// BatchCreateAttendee API operation. +type BatchCreateAttendeeResponse struct { + *BatchCreateAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchCreateAttendee request. +func (r *BatchCreateAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_BatchCreateRoomMembership.go b/service/chime/api_op_BatchCreateRoomMembership.go new file mode 100644 index 00000000000..895dfcf8d03 --- /dev/null +++ b/service/chime/api_op_BatchCreateRoomMembership.go @@ -0,0 +1,188 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type BatchCreateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The list of membership items. + // + // MembershipItemList is a required field + MembershipItemList []MembershipItem `type:"list" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCreateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchCreateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MembershipItemList == nil { + invalidParams.Add(aws.NewErrParamRequired("MembershipItemList")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MembershipItemList != nil { + v := s.MembershipItemList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "MembershipItemList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type BatchCreateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // If the action fails for one or more of the member IDs in the request, a list + // of the member IDs is returned, along with error codes and error messages. + Errors []MemberError `type:"list"` +} + +// String returns the string representation +func (s BatchCreateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opBatchCreateRoomMembership = "BatchCreateRoomMembership" + +// BatchCreateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Adds up to 50 members to a chat room. Members can be either users or bots. +// The member role designates whether the member is a chat room administrator +// or a general chat room member. +// +// // Example sending a request using BatchCreateRoomMembershipRequest. +// req := client.BatchCreateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/BatchCreateRoomMembership +func (c *Client) BatchCreateRoomMembershipRequest(input *BatchCreateRoomMembershipInput) BatchCreateRoomMembershipRequest { + op := &aws.Operation{ + Name: opBatchCreateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships?operation=batch-create", + } + + if input == nil { + input = &BatchCreateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &BatchCreateRoomMembershipOutput{}) + return BatchCreateRoomMembershipRequest{Request: req, Input: input, Copy: c.BatchCreateRoomMembershipRequest} +} + +// BatchCreateRoomMembershipRequest is the request type for the +// BatchCreateRoomMembership API operation. +type BatchCreateRoomMembershipRequest struct { + *aws.Request + Input *BatchCreateRoomMembershipInput + Copy func(*BatchCreateRoomMembershipInput) BatchCreateRoomMembershipRequest +} + +// Send marshals and sends the BatchCreateRoomMembership API request. +func (r BatchCreateRoomMembershipRequest) Send(ctx context.Context) (*BatchCreateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchCreateRoomMembershipResponse{ + BatchCreateRoomMembershipOutput: r.Request.Data.(*BatchCreateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchCreateRoomMembershipResponse is the response type for the +// BatchCreateRoomMembership API operation. +type BatchCreateRoomMembershipResponse struct { + *BatchCreateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchCreateRoomMembership request. +func (r *BatchCreateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateAttendee.go b/service/chime/api_op_CreateAttendee.go new file mode 100644 index 00000000000..5e1ce83a469 --- /dev/null +++ b/service/chime/api_op_CreateAttendee.go @@ -0,0 +1,164 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + // + // ExternalUserId is a required field + ExternalUserId *string `min:"2" type:"string" required:"true" sensitive:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAttendeeInput"} + + if s.ExternalUserId == nil { + invalidParams.Add(aws.NewErrParamRequired("ExternalUserId")) + } + if s.ExternalUserId != nil && len(*s.ExternalUserId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ExternalUserId", 2)) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The attendee information, including attendee ID and join token. + Attendee *Attendee `type:"structure"` +} + +// String returns the string representation +func (s CreateAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendee != nil { + v := s.Attendee + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Attendee", v, metadata) + } + return nil +} + +const opCreateAttendee = "CreateAttendee" + +// CreateAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a new attendee for an active Amazon Chime SDK meeting. For more information +// about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using CreateAttendeeRequest. +// req := client.CreateAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateAttendee +func (c *Client) CreateAttendeeRequest(input *CreateAttendeeInput) CreateAttendeeRequest { + op := &aws.Operation{ + Name: opCreateAttendee, + HTTPMethod: "POST", + HTTPPath: "/meetings/{meetingId}/attendees", + } + + if input == nil { + input = &CreateAttendeeInput{} + } + + req := c.newRequest(op, input, &CreateAttendeeOutput{}) + return CreateAttendeeRequest{Request: req, Input: input, Copy: c.CreateAttendeeRequest} +} + +// CreateAttendeeRequest is the request type for the +// CreateAttendee API operation. +type CreateAttendeeRequest struct { + *aws.Request + Input *CreateAttendeeInput + Copy func(*CreateAttendeeInput) CreateAttendeeRequest +} + +// Send marshals and sends the CreateAttendee API request. +func (r CreateAttendeeRequest) Send(ctx context.Context) (*CreateAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateAttendeeResponse{ + CreateAttendeeOutput: r.Request.Data.(*CreateAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateAttendeeResponse is the response type for the +// CreateAttendee API operation. +type CreateAttendeeResponse struct { + *CreateAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateAttendee request. +func (r *CreateAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateMeeting.go b/service/chime/api_op_CreateMeeting.go new file mode 100644 index 00000000000..4bc6344e70d --- /dev/null +++ b/service/chime/api_op_CreateMeeting.go @@ -0,0 +1,192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateMeetingInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the client request. Use a different token for different + // meetings. + // + // ClientRequestToken is a required field + ClientRequestToken *string `min:"2" type:"string" required:"true" idempotencyToken:"true" sensitive:"true"` + + // The Region in which to create the meeting. Available values: us-east-1, us-west-2. + MediaRegion *string `type:"string"` + + // Reserved. + MeetingHostId *string `min:"2" type:"string" sensitive:"true"` + + // The configuration for resource targets to receive notifications when meeting + // and attendee events occur. + NotificationsConfiguration *MeetingNotificationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateMeetingInput"} + + if s.ClientRequestToken == nil { + invalidParams.Add(aws.NewErrParamRequired("ClientRequestToken")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 2)) + } + if s.MeetingHostId != nil && len(*s.MeetingHostId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("MeetingHostId", 2)) + } + if s.NotificationsConfiguration != nil { + if err := s.NotificationsConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationsConfiguration", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ClientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MediaRegion != nil { + v := *s.MediaRegion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MediaRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingHostId != nil { + v := *s.MeetingHostId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeetingHostId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NotificationsConfiguration != nil { + v := s.NotificationsConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "NotificationsConfiguration", v, metadata) + } + return nil +} + +type CreateMeetingOutput struct { + _ struct{} `type:"structure"` + + // The meeting information, including the meeting ID and MediaPlacement. + Meeting *Meeting `type:"structure"` +} + +// String returns the string representation +func (s CreateMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meeting != nil { + v := s.Meeting + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Meeting", v, metadata) + } + return nil +} + +const opCreateMeeting = "CreateMeeting" + +// CreateMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a new Amazon Chime SDK meeting in the specified media Region with +// no initial attendees. For more information about the Amazon Chime SDK, see +// Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using CreateMeetingRequest. +// req := client.CreateMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateMeeting +func (c *Client) CreateMeetingRequest(input *CreateMeetingInput) CreateMeetingRequest { + op := &aws.Operation{ + Name: opCreateMeeting, + HTTPMethod: "POST", + HTTPPath: "/meetings", + } + + if input == nil { + input = &CreateMeetingInput{} + } + + req := c.newRequest(op, input, &CreateMeetingOutput{}) + return CreateMeetingRequest{Request: req, Input: input, Copy: c.CreateMeetingRequest} +} + +// CreateMeetingRequest is the request type for the +// CreateMeeting API operation. +type CreateMeetingRequest struct { + *aws.Request + Input *CreateMeetingInput + Copy func(*CreateMeetingInput) CreateMeetingRequest +} + +// Send marshals and sends the CreateMeeting API request. +func (r CreateMeetingRequest) Send(ctx context.Context) (*CreateMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateMeetingResponse{ + CreateMeetingOutput: r.Request.Data.(*CreateMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateMeetingResponse is the response type for the +// CreateMeeting API operation. +type CreateMeetingResponse struct { + *CreateMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateMeeting request. +func (r *CreateMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateRoom.go b/service/chime/api_op_CreateRoom.go new file mode 100644 index 00000000000..98af65aea73 --- /dev/null +++ b/service/chime/api_op_CreateRoom.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The idempotency token for the request. + ClientRequestToken *string `min:"2" type:"string" idempotencyToken:"true" sensitive:"true"` + + // The room name. + // + // Name is a required field + Name *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s CreateRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 2)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ClientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s CreateRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opCreateRoom = "CreateRoom" + +// CreateRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a chat room for the specified Amazon Chime account. +// +// // Example sending a request using CreateRoomRequest. +// req := client.CreateRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateRoom +func (c *Client) CreateRoomRequest(input *CreateRoomInput) CreateRoomRequest { + op := &aws.Operation{ + Name: opCreateRoom, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms", + } + + if input == nil { + input = &CreateRoomInput{} + } + + req := c.newRequest(op, input, &CreateRoomOutput{}) + return CreateRoomRequest{Request: req, Input: input, Copy: c.CreateRoomRequest} +} + +// CreateRoomRequest is the request type for the +// CreateRoom API operation. +type CreateRoomRequest struct { + *aws.Request + Input *CreateRoomInput + Copy func(*CreateRoomInput) CreateRoomRequest +} + +// Send marshals and sends the CreateRoom API request. +func (r CreateRoomRequest) Send(ctx context.Context) (*CreateRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRoomResponse{ + CreateRoomOutput: r.Request.Data.(*CreateRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRoomResponse is the response type for the +// CreateRoom API operation. +type CreateRoomResponse struct { + *CreateRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRoom request. +func (r *CreateRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateRoomMembership.go b/service/chime/api_op_CreateRoomMembership.go new file mode 100644 index 00000000000..b6f7d725b99 --- /dev/null +++ b/service/chime/api_op_CreateRoomMembership.go @@ -0,0 +1,184 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The Amazon Chime member ID (user ID or bot ID). + // + // MemberId is a required field + MemberId *string `type:"string" required:"true"` + + // The role of the member. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // The room membership details. + RoomMembership *RoomMembership `type:"structure"` +} + +// String returns the string representation +func (s CreateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RoomMembership != nil { + v := s.RoomMembership + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RoomMembership", v, metadata) + } + return nil +} + +const opCreateRoomMembership = "CreateRoomMembership" + +// CreateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Adds a member to a chat room. A member can be either a user or a bot. The +// member role designates whether the member is a chat room administrator or +// a general chat room member. +// +// // Example sending a request using CreateRoomMembershipRequest. +// req := client.CreateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateRoomMembership +func (c *Client) CreateRoomMembershipRequest(input *CreateRoomMembershipInput) CreateRoomMembershipRequest { + op := &aws.Operation{ + Name: opCreateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships", + } + + if input == nil { + input = &CreateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &CreateRoomMembershipOutput{}) + return CreateRoomMembershipRequest{Request: req, Input: input, Copy: c.CreateRoomMembershipRequest} +} + +// CreateRoomMembershipRequest is the request type for the +// CreateRoomMembership API operation. +type CreateRoomMembershipRequest struct { + *aws.Request + Input *CreateRoomMembershipInput + Copy func(*CreateRoomMembershipInput) CreateRoomMembershipRequest +} + +// Send marshals and sends the CreateRoomMembership API request. +func (r CreateRoomMembershipRequest) Send(ctx context.Context) (*CreateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRoomMembershipResponse{ + CreateRoomMembershipOutput: r.Request.Data.(*CreateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRoomMembershipResponse is the response type for the +// CreateRoomMembership API operation. +type CreateRoomMembershipResponse struct { + *CreateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRoomMembership request. +func (r *CreateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteAttendee.go b/service/chime/api_op_DeleteAttendee.go new file mode 100644 index 00000000000..66389e7c661 --- /dev/null +++ b/service/chime/api_op_DeleteAttendee.go @@ -0,0 +1,156 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + // + // AttendeeId is a required field + AttendeeId *string `location:"uri" locationName:"attendeeId" type:"string" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAttendeeInput"} + + if s.AttendeeId == nil { + invalidParams.Add(aws.NewErrParamRequired("AttendeeId")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "attendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteAttendeeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteAttendee = "DeleteAttendee" + +// DeleteAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes an attendee from the specified Amazon Chime SDK meeting and deletes +// their JoinToken. Attendees are automatically deleted when a Amazon Chime +// SDK meeting is deleted. For more information about the Amazon Chime SDK, +// see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using DeleteAttendeeRequest. +// req := client.DeleteAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteAttendee +func (c *Client) DeleteAttendeeRequest(input *DeleteAttendeeInput) DeleteAttendeeRequest { + op := &aws.Operation{ + Name: opDeleteAttendee, + HTTPMethod: "DELETE", + HTTPPath: "/meetings/{meetingId}/attendees/{attendeeId}", + } + + if input == nil { + input = &DeleteAttendeeInput{} + } + + req := c.newRequest(op, input, &DeleteAttendeeOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAttendeeRequest{Request: req, Input: input, Copy: c.DeleteAttendeeRequest} +} + +// DeleteAttendeeRequest is the request type for the +// DeleteAttendee API operation. +type DeleteAttendeeRequest struct { + *aws.Request + Input *DeleteAttendeeInput + Copy func(*DeleteAttendeeInput) DeleteAttendeeRequest +} + +// Send marshals and sends the DeleteAttendee API request. +func (r DeleteAttendeeRequest) Send(ctx context.Context) (*DeleteAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAttendeeResponse{ + DeleteAttendeeOutput: r.Request.Data.(*DeleteAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAttendeeResponse is the response type for the +// DeleteAttendee API operation. +type DeleteAttendeeResponse struct { + *DeleteAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAttendee request. +func (r *DeleteAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteMeeting.go b/service/chime/api_op_DeleteMeeting.go new file mode 100644 index 00000000000..60922f84581 --- /dev/null +++ b/service/chime/api_op_DeleteMeeting.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteMeetingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteMeetingInput"} + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteMeetingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteMeeting = "DeleteMeeting" + +// DeleteMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, +// its attendees are also deleted and clients can no longer join it. For more +// information about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using DeleteMeetingRequest. +// req := client.DeleteMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteMeeting +func (c *Client) DeleteMeetingRequest(input *DeleteMeetingInput) DeleteMeetingRequest { + op := &aws.Operation{ + Name: opDeleteMeeting, + HTTPMethod: "DELETE", + HTTPPath: "/meetings/{meetingId}", + } + + if input == nil { + input = &DeleteMeetingInput{} + } + + req := c.newRequest(op, input, &DeleteMeetingOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteMeetingRequest{Request: req, Input: input, Copy: c.DeleteMeetingRequest} +} + +// DeleteMeetingRequest is the request type for the +// DeleteMeeting API operation. +type DeleteMeetingRequest struct { + *aws.Request + Input *DeleteMeetingInput + Copy func(*DeleteMeetingInput) DeleteMeetingRequest +} + +// Send marshals and sends the DeleteMeeting API request. +func (r DeleteMeetingRequest) Send(ctx context.Context) (*DeleteMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteMeetingResponse{ + DeleteMeetingOutput: r.Request.Data.(*DeleteMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteMeetingResponse is the response type for the +// DeleteMeeting API operation. +type DeleteMeetingResponse struct { + *DeleteMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteMeeting request. +func (r *DeleteMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteRoom.go b/service/chime/api_op_DeleteRoom.go new file mode 100644 index 00000000000..01a0e08b39b --- /dev/null +++ b/service/chime/api_op_DeleteRoom.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The chat room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteRoomOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteRoom = "DeleteRoom" + +// DeleteRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes a chat room. +// +// // Example sending a request using DeleteRoomRequest. +// req := client.DeleteRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteRoom +func (c *Client) DeleteRoomRequest(input *DeleteRoomInput) DeleteRoomRequest { + op := &aws.Operation{ + Name: opDeleteRoom, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &DeleteRoomInput{} + } + + req := c.newRequest(op, input, &DeleteRoomOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRoomRequest{Request: req, Input: input, Copy: c.DeleteRoomRequest} +} + +// DeleteRoomRequest is the request type for the +// DeleteRoom API operation. +type DeleteRoomRequest struct { + *aws.Request + Input *DeleteRoomInput + Copy func(*DeleteRoomInput) DeleteRoomRequest +} + +// Send marshals and sends the DeleteRoom API request. +func (r DeleteRoomRequest) Send(ctx context.Context) (*DeleteRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRoomResponse{ + DeleteRoomOutput: r.Request.Data.(*DeleteRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRoomResponse is the response type for the +// DeleteRoom API operation. +type DeleteRoomResponse struct { + *DeleteRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRoom request. +func (r *DeleteRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteRoomMembership.go b/service/chime/api_op_DeleteRoomMembership.go new file mode 100644 index 00000000000..fded03a9712 --- /dev/null +++ b/service/chime/api_op_DeleteRoomMembership.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The member ID (user ID or bot ID). + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" type:"string" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "memberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteRoomMembershipOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteRoomMembership = "DeleteRoomMembership" + +// DeleteRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Removes a member from a chat room. +// +// // Example sending a request using DeleteRoomMembershipRequest. +// req := client.DeleteRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteRoomMembership +func (c *Client) DeleteRoomMembershipRequest(input *DeleteRoomMembershipInput) DeleteRoomMembershipRequest { + op := &aws.Operation{ + Name: opDeleteRoomMembership, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + } + + if input == nil { + input = &DeleteRoomMembershipInput{} + } + + req := c.newRequest(op, input, &DeleteRoomMembershipOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRoomMembershipRequest{Request: req, Input: input, Copy: c.DeleteRoomMembershipRequest} +} + +// DeleteRoomMembershipRequest is the request type for the +// DeleteRoomMembership API operation. +type DeleteRoomMembershipRequest struct { + *aws.Request + Input *DeleteRoomMembershipInput + Copy func(*DeleteRoomMembershipInput) DeleteRoomMembershipRequest +} + +// Send marshals and sends the DeleteRoomMembership API request. +func (r DeleteRoomMembershipRequest) Send(ctx context.Context) (*DeleteRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRoomMembershipResponse{ + DeleteRoomMembershipOutput: r.Request.Data.(*DeleteRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRoomMembershipResponse is the response type for the +// DeleteRoomMembership API operation. +type DeleteRoomMembershipResponse struct { + *DeleteRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRoomMembership request. +func (r *DeleteRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetAttendee.go b/service/chime/api_op_GetAttendee.go new file mode 100644 index 00000000000..b6885700d4a --- /dev/null +++ b/service/chime/api_op_GetAttendee.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + // + // AttendeeId is a required field + AttendeeId *string `location:"uri" locationName:"attendeeId" type:"string" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetAttendeeInput"} + + if s.AttendeeId == nil { + invalidParams.Add(aws.NewErrParamRequired("AttendeeId")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "attendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee information. + Attendee *Attendee `type:"structure"` +} + +// String returns the string representation +func (s GetAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendee != nil { + v := s.Attendee + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Attendee", v, metadata) + } + return nil +} + +const opGetAttendee = "GetAttendee" + +// GetAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Gets the Amazon Chime SDK attendee details for a specified meeting ID and +// attendee ID. For more information about the Amazon Chime SDK, see Using the +// Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using GetAttendeeRequest. +// req := client.GetAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetAttendee +func (c *Client) GetAttendeeRequest(input *GetAttendeeInput) GetAttendeeRequest { + op := &aws.Operation{ + Name: opGetAttendee, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}/attendees/{attendeeId}", + } + + if input == nil { + input = &GetAttendeeInput{} + } + + req := c.newRequest(op, input, &GetAttendeeOutput{}) + return GetAttendeeRequest{Request: req, Input: input, Copy: c.GetAttendeeRequest} +} + +// GetAttendeeRequest is the request type for the +// GetAttendee API operation. +type GetAttendeeRequest struct { + *aws.Request + Input *GetAttendeeInput + Copy func(*GetAttendeeInput) GetAttendeeRequest +} + +// Send marshals and sends the GetAttendee API request. +func (r GetAttendeeRequest) Send(ctx context.Context) (*GetAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAttendeeResponse{ + GetAttendeeOutput: r.Request.Data.(*GetAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAttendeeResponse is the response type for the +// GetAttendee API operation. +type GetAttendeeResponse struct { + *GetAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAttendee request. +func (r *GetAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetMeeting.go b/service/chime/api_op_GetMeeting.go new file mode 100644 index 00000000000..eed489aacc1 --- /dev/null +++ b/service/chime/api_op_GetMeeting.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetMeetingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetMeetingInput"} + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetMeetingOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting information. + Meeting *Meeting `type:"structure"` +} + +// String returns the string representation +func (s GetMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meeting != nil { + v := s.Meeting + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Meeting", v, metadata) + } + return nil +} + +const opGetMeeting = "GetMeeting" + +// GetMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Gets the Amazon Chime SDK meeting details for the specified meeting ID. For +// more information about the Amazon Chime SDK, see Using the Amazon Chime SDK +// (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) in the Amazon +// Chime Developer Guide. +// +// // Example sending a request using GetMeetingRequest. +// req := client.GetMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetMeeting +func (c *Client) GetMeetingRequest(input *GetMeetingInput) GetMeetingRequest { + op := &aws.Operation{ + Name: opGetMeeting, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}", + } + + if input == nil { + input = &GetMeetingInput{} + } + + req := c.newRequest(op, input, &GetMeetingOutput{}) + return GetMeetingRequest{Request: req, Input: input, Copy: c.GetMeetingRequest} +} + +// GetMeetingRequest is the request type for the +// GetMeeting API operation. +type GetMeetingRequest struct { + *aws.Request + Input *GetMeetingInput + Copy func(*GetMeetingInput) GetMeetingRequest +} + +// Send marshals and sends the GetMeeting API request. +func (r GetMeetingRequest) Send(ctx context.Context) (*GetMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetMeetingResponse{ + GetMeetingOutput: r.Request.Data.(*GetMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetMeetingResponse is the response type for the +// GetMeeting API operation. +type GetMeetingResponse struct { + *GetMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetMeeting request. +func (r *GetMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetRoom.go b/service/chime/api_op_GetRoom.go new file mode 100644 index 00000000000..76447cc6f0d --- /dev/null +++ b/service/chime/api_op_GetRoom.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s GetRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opGetRoom = "GetRoom" + +// GetRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Retrieves room details, such as name. +// +// // Example sending a request using GetRoomRequest. +// req := client.GetRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetRoom +func (c *Client) GetRoomRequest(input *GetRoomInput) GetRoomRequest { + op := &aws.Operation{ + Name: opGetRoom, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &GetRoomInput{} + } + + req := c.newRequest(op, input, &GetRoomOutput{}) + return GetRoomRequest{Request: req, Input: input, Copy: c.GetRoomRequest} +} + +// GetRoomRequest is the request type for the +// GetRoom API operation. +type GetRoomRequest struct { + *aws.Request + Input *GetRoomInput + Copy func(*GetRoomInput) GetRoomRequest +} + +// Send marshals and sends the GetRoom API request. +func (r GetRoomRequest) Send(ctx context.Context) (*GetRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetRoomResponse{ + GetRoomOutput: r.Request.Data.(*GetRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetRoomResponse is the response type for the +// GetRoom API operation. +type GetRoomResponse struct { + *GetRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetRoom request. +func (r *GetRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go b/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go index c2a04f1eb24..926f163fb1f 100644 --- a/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go +++ b/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go @@ -81,8 +81,8 @@ const opGetVoiceConnectorStreamingConfiguration = "GetVoiceConnectorStreamingCon // // Retrieves the streaming configuration details for the specified Amazon Chime // Voice Connector. Shows whether media streaming is enabled for sending to -// Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, -// in hours. +// Amazon Kinesis. It also shows the retention period, in hours, for the Amazon +// Kinesis data. // // // Example sending a request using GetVoiceConnectorStreamingConfigurationRequest. // req := client.GetVoiceConnectorStreamingConfigurationRequest(params) diff --git a/service/chime/api_op_InviteUsers.go b/service/chime/api_op_InviteUsers.go index 6e1bd7e7d20..f8a713acdba 100644 --- a/service/chime/api_op_InviteUsers.go +++ b/service/chime/api_op_InviteUsers.go @@ -18,7 +18,7 @@ type InviteUsersInput struct { // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` - // The user email addresses to which to send the invite. + // The user email addresses to which to send the email invitation. // // UserEmailList is a required field UserEmailList []string `type:"list" required:"true"` @@ -75,7 +75,7 @@ func (s InviteUsersInput) MarshalFields(e protocol.FieldEncoder) error { type InviteUsersOutput struct { _ struct{} `type:"structure"` - // The invite details. + // The email invitation details. Invites []Invite `type:"list"` } @@ -106,9 +106,9 @@ const opInviteUsers = "InviteUsers" // InviteUsersRequest returns a request value for making API operation for // Amazon Chime. // -// Sends email invites to as many as 50 users, inviting them to the specified -// Amazon Chime Team account. Only Team account types are currently supported -// for this action. +// Sends email to a maximum of 50 users, inviting them to the specified Amazon +// Chime Team account. Only Team account types are currently supported for this +// action. // // // Example sending a request using InviteUsersRequest. // req := client.InviteUsersRequest(params) diff --git a/service/chime/api_op_ListAttendees.go b/service/chime/api_op_ListAttendees.go new file mode 100644 index 00000000000..2c596a9ee6b --- /dev/null +++ b/service/chime/api_op_ListAttendees.go @@ -0,0 +1,234 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListAttendeesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListAttendeesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttendeesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAttendeesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListAttendeesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListAttendeesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee information. + Attendees []Attendee `type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAttendeesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListAttendeesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListAttendees = "ListAttendees" + +// ListAttendeesRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the attendees for the specified Amazon Chime SDK meeting. For more +// information about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using ListAttendeesRequest. +// req := client.ListAttendeesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListAttendees +func (c *Client) ListAttendeesRequest(input *ListAttendeesInput) ListAttendeesRequest { + op := &aws.Operation{ + Name: opListAttendees, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}/attendees", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAttendeesInput{} + } + + req := c.newRequest(op, input, &ListAttendeesOutput{}) + return ListAttendeesRequest{Request: req, Input: input, Copy: c.ListAttendeesRequest} +} + +// ListAttendeesRequest is the request type for the +// ListAttendees API operation. +type ListAttendeesRequest struct { + *aws.Request + Input *ListAttendeesInput + Copy func(*ListAttendeesInput) ListAttendeesRequest +} + +// Send marshals and sends the ListAttendees API request. +func (r ListAttendeesRequest) Send(ctx context.Context) (*ListAttendeesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAttendeesResponse{ + ListAttendeesOutput: r.Request.Data.(*ListAttendeesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAttendeesRequestPaginator returns a paginator for ListAttendees. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAttendeesRequest(input) +// p := chime.NewListAttendeesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAttendeesPaginator(req ListAttendeesRequest) ListAttendeesPaginator { + return ListAttendeesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAttendeesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAttendeesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAttendeesPaginator struct { + aws.Pager +} + +func (p *ListAttendeesPaginator) CurrentPage() *ListAttendeesOutput { + return p.Pager.CurrentPage().(*ListAttendeesOutput) +} + +// ListAttendeesResponse is the response type for the +// ListAttendees API operation. +type ListAttendeesResponse struct { + *ListAttendeesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAttendees request. +func (r *ListAttendeesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListBots.go b/service/chime/api_op_ListBots.go index 9d15a279c1d..2c04f7439b9 100644 --- a/service/chime/api_op_ListBots.go +++ b/service/chime/api_op_ListBots.go @@ -18,7 +18,8 @@ type ListBotsInput struct { // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` - // The maximum number of results to return in a single call. Default is 10. + // The maximum number of results to return in a single call. The default is + // 10. MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` // The token to use to retrieve the next page of results. diff --git a/service/chime/api_op_ListMeetings.go b/service/chime/api_op_ListMeetings.go new file mode 100644 index 00000000000..5f784e5cb4e --- /dev/null +++ b/service/chime/api_op_ListMeetings.go @@ -0,0 +1,219 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListMeetingsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListMeetingsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMeetingsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListMeetingsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListMeetingsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListMeetingsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting information. + Meetings []Meeting `type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMeetingsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListMeetingsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meetings != nil { + v := s.Meetings + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Meetings", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListMeetings = "ListMeetings" + +// ListMeetingsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists up to 100 active Amazon Chime SDK meetings. For more information about +// the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using ListMeetingsRequest. +// req := client.ListMeetingsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListMeetings +func (c *Client) ListMeetingsRequest(input *ListMeetingsInput) ListMeetingsRequest { + op := &aws.Operation{ + Name: opListMeetings, + HTTPMethod: "GET", + HTTPPath: "/meetings", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMeetingsInput{} + } + + req := c.newRequest(op, input, &ListMeetingsOutput{}) + return ListMeetingsRequest{Request: req, Input: input, Copy: c.ListMeetingsRequest} +} + +// ListMeetingsRequest is the request type for the +// ListMeetings API operation. +type ListMeetingsRequest struct { + *aws.Request + Input *ListMeetingsInput + Copy func(*ListMeetingsInput) ListMeetingsRequest +} + +// Send marshals and sends the ListMeetings API request. +func (r ListMeetingsRequest) Send(ctx context.Context) (*ListMeetingsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListMeetingsResponse{ + ListMeetingsOutput: r.Request.Data.(*ListMeetingsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListMeetingsRequestPaginator returns a paginator for ListMeetings. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListMeetingsRequest(input) +// p := chime.NewListMeetingsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListMeetingsPaginator(req ListMeetingsRequest) ListMeetingsPaginator { + return ListMeetingsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListMeetingsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListMeetingsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListMeetingsPaginator struct { + aws.Pager +} + +func (p *ListMeetingsPaginator) CurrentPage() *ListMeetingsOutput { + return p.Pager.CurrentPage().(*ListMeetingsOutput) +} + +// ListMeetingsResponse is the response type for the +// ListMeetings API operation. +type ListMeetingsResponse struct { + *ListMeetingsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListMeetings request. +func (r *ListMeetingsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListRoomMemberships.go b/service/chime/api_op_ListRoomMemberships.go new file mode 100644 index 00000000000..e8d01e29468 --- /dev/null +++ b/service/chime/api_op_ListRoomMemberships.go @@ -0,0 +1,248 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRoomMembershipsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRoomMembershipsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoomMembershipsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRoomMembershipsInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomMembershipsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRoomMembershipsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` + + // The room membership details. + RoomMemberships []RoomMembership `type:"list"` +} + +// String returns the string representation +func (s ListRoomMembershipsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomMembershipsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomMemberships != nil { + v := s.RoomMemberships + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RoomMemberships", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRoomMemberships = "ListRoomMemberships" + +// ListRoomMembershipsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the membership details for the specified room, such as member IDs, +// member email addresses, and member names. +// +// // Example sending a request using ListRoomMembershipsRequest. +// req := client.ListRoomMembershipsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListRoomMemberships +func (c *Client) ListRoomMembershipsRequest(input *ListRoomMembershipsInput) ListRoomMembershipsRequest { + op := &aws.Operation{ + Name: opListRoomMemberships, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRoomMembershipsInput{} + } + + req := c.newRequest(op, input, &ListRoomMembershipsOutput{}) + return ListRoomMembershipsRequest{Request: req, Input: input, Copy: c.ListRoomMembershipsRequest} +} + +// ListRoomMembershipsRequest is the request type for the +// ListRoomMemberships API operation. +type ListRoomMembershipsRequest struct { + *aws.Request + Input *ListRoomMembershipsInput + Copy func(*ListRoomMembershipsInput) ListRoomMembershipsRequest +} + +// Send marshals and sends the ListRoomMemberships API request. +func (r ListRoomMembershipsRequest) Send(ctx context.Context) (*ListRoomMembershipsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRoomMembershipsResponse{ + ListRoomMembershipsOutput: r.Request.Data.(*ListRoomMembershipsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRoomMembershipsRequestPaginator returns a paginator for ListRoomMemberships. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRoomMembershipsRequest(input) +// p := chime.NewListRoomMembershipsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRoomMembershipsPaginator(req ListRoomMembershipsRequest) ListRoomMembershipsPaginator { + return ListRoomMembershipsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRoomMembershipsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRoomMembershipsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRoomMembershipsPaginator struct { + aws.Pager +} + +func (p *ListRoomMembershipsPaginator) CurrentPage() *ListRoomMembershipsOutput { + return p.Pager.CurrentPage().(*ListRoomMembershipsOutput) +} + +// ListRoomMembershipsResponse is the response type for the +// ListRoomMemberships API operation. +type ListRoomMembershipsResponse struct { + *ListRoomMembershipsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRoomMemberships request. +func (r *ListRoomMembershipsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListRooms.go b/service/chime/api_op_ListRooms.go new file mode 100644 index 00000000000..3f0236e5e9d --- /dev/null +++ b/service/chime/api_op_ListRooms.go @@ -0,0 +1,243 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRoomsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The member ID (user ID or bot ID). + MemberId *string `location:"querystring" locationName:"member-id" type:"string"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListRoomsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoomsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRoomsInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "member-id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRoomsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` + + // The room details. + Rooms []Room `type:"list"` +} + +// String returns the string representation +func (s ListRoomsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Rooms != nil { + v := s.Rooms + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Rooms", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRooms = "ListRooms" + +// ListRoomsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the room details for the specified Amazon Chime account. Optionally, +// filter the results by a member ID (user ID or bot ID) to see a list of rooms +// that the member belongs to. +// +// // Example sending a request using ListRoomsRequest. +// req := client.ListRoomsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListRooms +func (c *Client) ListRoomsRequest(input *ListRoomsInput) ListRoomsRequest { + op := &aws.Operation{ + Name: opListRooms, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRoomsInput{} + } + + req := c.newRequest(op, input, &ListRoomsOutput{}) + return ListRoomsRequest{Request: req, Input: input, Copy: c.ListRoomsRequest} +} + +// ListRoomsRequest is the request type for the +// ListRooms API operation. +type ListRoomsRequest struct { + *aws.Request + Input *ListRoomsInput + Copy func(*ListRoomsInput) ListRoomsRequest +} + +// Send marshals and sends the ListRooms API request. +func (r ListRoomsRequest) Send(ctx context.Context) (*ListRoomsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRoomsResponse{ + ListRoomsOutput: r.Request.Data.(*ListRoomsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRoomsRequestPaginator returns a paginator for ListRooms. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRoomsRequest(input) +// p := chime.NewListRoomsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRoomsPaginator(req ListRoomsRequest) ListRoomsPaginator { + return ListRoomsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRoomsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRoomsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRoomsPaginator struct { + aws.Pager +} + +func (p *ListRoomsPaginator) CurrentPage() *ListRoomsOutput { + return p.Pager.CurrentPage().(*ListRoomsOutput) +} + +// ListRoomsResponse is the response type for the +// ListRooms API operation. +type ListRoomsResponse struct { + *ListRoomsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRooms request. +func (r *ListRoomsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go b/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go index 894747f42cd..d274169e272 100644 --- a/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go +++ b/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go @@ -101,8 +101,8 @@ const opPutVoiceConnectorStreamingConfiguration = "PutVoiceConnectorStreamingCon // // Adds a streaming configuration for the specified Amazon Chime Voice Connector. // The streaming configuration specifies whether media streaming is enabled -// for sending to Amazon Kinesis, and sets the retention period for the Amazon -// Kinesis data, in hours. +// for sending to Amazon Kinesis. It also sets the retention period, in hours, +// for the Amazon Kinesis data. // // // Example sending a request using PutVoiceConnectorStreamingConfigurationRequest. // req := client.PutVoiceConnectorStreamingConfigurationRequest(params) diff --git a/service/chime/api_op_UpdatePhoneNumberSettings.go b/service/chime/api_op_UpdatePhoneNumberSettings.go index 999f21b9ab0..579c24440d6 100644 --- a/service/chime/api_op_UpdatePhoneNumberSettings.go +++ b/service/chime/api_op_UpdatePhoneNumberSettings.go @@ -74,7 +74,7 @@ const opUpdatePhoneNumberSettings = "UpdatePhoneNumberSettings" // Updates the phone number settings for the administrator's AWS account, such // as the default outbound calling name. You can update the default outbound // calling name once every seven days. Outbound calling names can take up to -// 72 hours to be updated. +// 72 hours to update. // // // Example sending a request using UpdatePhoneNumberSettingsRequest. // req := client.UpdatePhoneNumberSettingsRequest(params) diff --git a/service/chime/api_op_UpdateRoom.go b/service/chime/api_op_UpdateRoom.go new file mode 100644 index 00000000000..232ecb33e8b --- /dev/null +++ b/service/chime/api_op_UpdateRoom.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The room name. + Name *string `type:"string" sensitive:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opUpdateRoom = "UpdateRoom" + +// UpdateRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Updates room details, such as the room name. +// +// // Example sending a request using UpdateRoomRequest. +// req := client.UpdateRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/UpdateRoom +func (c *Client) UpdateRoomRequest(input *UpdateRoomInput) UpdateRoomRequest { + op := &aws.Operation{ + Name: opUpdateRoom, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &UpdateRoomInput{} + } + + req := c.newRequest(op, input, &UpdateRoomOutput{}) + return UpdateRoomRequest{Request: req, Input: input, Copy: c.UpdateRoomRequest} +} + +// UpdateRoomRequest is the request type for the +// UpdateRoom API operation. +type UpdateRoomRequest struct { + *aws.Request + Input *UpdateRoomInput + Copy func(*UpdateRoomInput) UpdateRoomRequest +} + +// Send marshals and sends the UpdateRoom API request. +func (r UpdateRoomRequest) Send(ctx context.Context) (*UpdateRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoomResponse{ + UpdateRoomOutput: r.Request.Data.(*UpdateRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoomResponse is the response type for the +// UpdateRoom API operation. +type UpdateRoomResponse struct { + *UpdateRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRoom request. +func (r *UpdateRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_UpdateRoomMembership.go b/service/chime/api_op_UpdateRoomMembership.go new file mode 100644 index 00000000000..0c0018b8714 --- /dev/null +++ b/service/chime/api_op_UpdateRoomMembership.go @@ -0,0 +1,184 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The member ID. + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" type:"string" required:"true"` + + // The role of the member. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "memberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // The room membership details. + RoomMembership *RoomMembership `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RoomMembership != nil { + v := s.RoomMembership + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RoomMembership", v, metadata) + } + return nil +} + +const opUpdateRoomMembership = "UpdateRoomMembership" + +// UpdateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Updates room membership details, such as member role. The member role designates +// whether the member is a chat room administrator or a general chat room member. +// Member role can only be updated for user IDs. +// +// // Example sending a request using UpdateRoomMembershipRequest. +// req := client.UpdateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/UpdateRoomMembership +func (c *Client) UpdateRoomMembershipRequest(input *UpdateRoomMembershipInput) UpdateRoomMembershipRequest { + op := &aws.Operation{ + Name: opUpdateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + } + + if input == nil { + input = &UpdateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &UpdateRoomMembershipOutput{}) + return UpdateRoomMembershipRequest{Request: req, Input: input, Copy: c.UpdateRoomMembershipRequest} +} + +// UpdateRoomMembershipRequest is the request type for the +// UpdateRoomMembership API operation. +type UpdateRoomMembershipRequest struct { + *aws.Request + Input *UpdateRoomMembershipInput + Copy func(*UpdateRoomMembershipInput) UpdateRoomMembershipRequest +} + +// Send marshals and sends the UpdateRoomMembership API request. +func (r UpdateRoomMembershipRequest) Send(ctx context.Context) (*UpdateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoomMembershipResponse{ + UpdateRoomMembershipOutput: r.Request.Data.(*UpdateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoomMembershipResponse is the response type for the +// UpdateRoomMembership API operation. +type UpdateRoomMembershipResponse struct { + *UpdateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRoomMembership request. +func (r *UpdateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_types.go b/service/chime/api_types.go index 850d4f97538..4140162ef09 100644 --- a/service/chime/api_types.go +++ b/service/chime/api_types.go @@ -147,6 +147,56 @@ func (s AccountSettings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId and JoinToken. +// The JoinToken allows a client to authenticate and join as the specified attendee. +// The JoinToken expires when the meeting ends or when DeleteAttendee is called. +// After that, the attendee is unable to join the meeting. +// +// We recommend securely transferring each JoinToken from your server application +// to the client so that no other client has access to the token except for +// the one authorized to represent the attendee. +type Attendee struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + AttendeeId *string `type:"string"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + ExternalUserId *string `min:"2" type:"string" sensitive:"true"` + + // The join token used by the Amazon Chime SDK attendee. + JoinToken *string `min:"2" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s Attendee) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Attendee) MarshalFields(e protocol.FieldEncoder) error { + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AttendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.JoinToken != nil { + v := *s.JoinToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "JoinToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // A resource that allows Enterprise account administrators to configure an // interface to receive events from Amazon Chime. type Bot struct { @@ -272,6 +322,96 @@ func (s BusinessCallingSettings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The list of errors returned when errors are encountered during the BatchCreateAttendee +// and CreateAttendee actions. This includes external user IDs, error codes, +// and error messages. +type CreateAttendeeError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The error message. + ErrorMessage *string `type:"string"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + ExternalUserId *string `min:"2" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s CreateAttendeeError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeError) MarshalFields(e protocol.FieldEncoder) error { + if s.ErrorCode != nil { + v := *s.ErrorCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ErrorMessage != nil { + v := *s.ErrorMessage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee +// action. +type CreateAttendeeRequestItem struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + // + // ExternalUserId is a required field + ExternalUserId *string `min:"2" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s CreateAttendeeRequestItem) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAttendeeRequestItem) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAttendeeRequestItem"} + + if s.ExternalUserId == nil { + invalidParams.Add(aws.NewErrParamRequired("ExternalUserId")) + } + if s.ExternalUserId != nil && len(*s.ExternalUserId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ExternalUserId", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeRequestItem) MarshalFields(e protocol.FieldEncoder) error { + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // The SIP credentials used to authenticate requests to your Amazon Chime Voice // Connector. type Credential struct { @@ -429,6 +569,303 @@ func (s LoggingConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A set of endpoints used by clients to connect to the media service group +// for a Amazon Chime SDK meeting. +type MediaPlacement struct { + _ struct{} `type:"structure"` + + // The audio host URL. + AudioHostUrl *string `type:"string"` + + // The screen data URL. + ScreenDataUrl *string `type:"string"` + + // The screen sharing URL. + ScreenSharingUrl *string `type:"string"` + + // The screen viewing URL. + ScreenViewingUrl *string `type:"string"` + + // The signaling URL. + SignalingUrl *string `type:"string"` + + // The turn control URL. + TurnControlUrl *string `type:"string"` +} + +// String returns the string representation +func (s MediaPlacement) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MediaPlacement) MarshalFields(e protocol.FieldEncoder) error { + if s.AudioHostUrl != nil { + v := *s.AudioHostUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AudioHostUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenDataUrl != nil { + v := *s.ScreenDataUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenDataUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenSharingUrl != nil { + v := *s.ScreenSharingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenSharingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenViewingUrl != nil { + v := *s.ScreenViewingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenViewingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignalingUrl != nil { + v := *s.SignalingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignalingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TurnControlUrl != nil { + v := *s.TurnControlUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TurnControlUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A meeting created using the Amazon Chime SDK. +type Meeting struct { + _ struct{} `type:"structure"` + + // The media placement for the meeting. + MediaPlacement *MediaPlacement `type:"structure"` + + // The Region in which to create the meeting. Available values: us-east-1, us-west-2. + MediaRegion *string `type:"string"` + + // The Amazon Chime SDK meeting ID. + MeetingId *string `type:"string"` +} + +// String returns the string representation +func (s Meeting) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Meeting) MarshalFields(e protocol.FieldEncoder) error { + if s.MediaPlacement != nil { + v := s.MediaPlacement + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MediaPlacement", v, metadata) + } + if s.MediaRegion != nil { + v := *s.MediaRegion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MediaRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The configuration for resource targets to receive notifications when Amazon +// Chime SDK meeting and attendee events occur. +type MeetingNotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The SNS topic ARN. + SnsTopicArn *string `min:"1" type:"string" sensitive:"true"` + + // The SQS queue ARN. + SqsQueueArn *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s MeetingNotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MeetingNotificationConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MeetingNotificationConfiguration"} + if s.SnsTopicArn != nil && len(*s.SnsTopicArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SnsTopicArn", 1)) + } + if s.SqsQueueArn != nil && len(*s.SqsQueueArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SqsQueueArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MeetingNotificationConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.SnsTopicArn != nil { + v := *s.SnsTopicArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SnsTopicArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SqsQueueArn != nil { + v := *s.SqsQueueArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SqsQueueArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The member details, such as email address, name, member ID, and member type. +type Member struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + AccountId *string `type:"string"` + + // The member email address. + Email *string `type:"string" sensitive:"true"` + + // The member name. + FullName *string `type:"string" sensitive:"true"` + + // The member ID (user ID or bot ID). + MemberId *string `type:"string"` + + // The member type. + MemberType MemberType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s Member) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Member) MarshalFields(e protocol.FieldEncoder) error { + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Email != nil { + v := *s.Email + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Email", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.FullName != nil { + v := *s.FullName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FullName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.MemberType) > 0 { + v := s.MemberType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The list of errors returned when a member action results in an error. +type MemberError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode ErrorCode `type:"string" enum:"true"` + + // The error message. + ErrorMessage *string `type:"string"` + + // The member ID. + MemberId *string `type:"string"` +} + +// String returns the string representation +func (s MemberError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MemberError) MarshalFields(e protocol.FieldEncoder) error { + if len(s.ErrorCode) > 0 { + v := s.ErrorCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ErrorMessage != nil { + v := *s.ErrorMessage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Membership details, such as member ID and member role. +type MembershipItem struct { + _ struct{} `type:"structure"` + + // The member ID. + MemberId *string `type:"string"` + + // The member role. + Role RoomMembershipRole `type:"string" enum:"true"` +} + +// String returns the string representation +func (s MembershipItem) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MembershipItem) MarshalFields(e protocol.FieldEncoder) error { + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + // A phone number for which an order has been placed. type OrderedPhoneNumber struct { _ struct{} `type:"structure"` @@ -973,13 +1410,145 @@ func (s PhoneNumberOrder) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The Amazon Chime chat room details. +type Room struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + AccountId *string `type:"string"` + + // The identifier of the room creator. + CreatedBy *string `type:"string"` + + // The room creation timestamp, in ISO 8601 format. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The room name. + Name *string `type:"string" sensitive:"true"` + + // The room ID. + RoomId *string `type:"string"` + + // The room update timestamp, in ISO 8601 format. + UpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Room) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Room) MarshalFields(e protocol.FieldEncoder) error { + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedBy != nil { + v := *s.CreatedBy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedBy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTimestamp != nil { + v := *s.CreatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RoomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedTimestamp != nil { + v := *s.UpdatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// The room membership details. +type RoomMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the user that invited the room member. + InvitedBy *string `type:"string"` + + // The member details, such as email address, name, member ID, and member type. + Member *Member `type:"structure"` + + // The membership role. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + RoomId *string `type:"string"` + + // The room membership update timestamp, in ISO 8601 format. + UpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s RoomMembership) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RoomMembership) MarshalFields(e protocol.FieldEncoder) error { + if s.InvitedBy != nil { + v := *s.InvitedBy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InvitedBy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Member != nil { + v := s.Member + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Member", v, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RoomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedTimestamp != nil { + v := *s.UpdatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + // The streaming configuration associated with an Amazon Chime Voice Connector. // Specifies whether media streaming is enabled for sending to Amazon Kinesis, // and shows the retention period for the Amazon Kinesis data, in hours. type StreamingConfiguration struct { _ struct{} `type:"structure"` - // The retention period for the Amazon Kinesis data, in hours. + // The retention period, in hours, for the Amazon Kinesis data. // // DataRetentionInHours is a required field DataRetentionInHours *int64 `type:"integer" required:"true"` diff --git a/service/chime/chimeiface/interface.go b/service/chime/chimeiface/interface.go index 8404cb474b6..0b6f10ffcd0 100644 --- a/service/chime/chimeiface/interface.go +++ b/service/chime/chimeiface/interface.go @@ -67,6 +67,10 @@ type ClientAPI interface { AssociatePhoneNumbersWithVoiceConnectorGroupRequest(*chime.AssociatePhoneNumbersWithVoiceConnectorGroupInput) chime.AssociatePhoneNumbersWithVoiceConnectorGroupRequest + BatchCreateAttendeeRequest(*chime.BatchCreateAttendeeInput) chime.BatchCreateAttendeeRequest + + BatchCreateRoomMembershipRequest(*chime.BatchCreateRoomMembershipInput) chime.BatchCreateRoomMembershipRequest + BatchDeletePhoneNumberRequest(*chime.BatchDeletePhoneNumberInput) chime.BatchDeletePhoneNumberRequest BatchSuspendUserRequest(*chime.BatchSuspendUserInput) chime.BatchSuspendUserRequest @@ -79,20 +83,36 @@ type ClientAPI interface { CreateAccountRequest(*chime.CreateAccountInput) chime.CreateAccountRequest + CreateAttendeeRequest(*chime.CreateAttendeeInput) chime.CreateAttendeeRequest + CreateBotRequest(*chime.CreateBotInput) chime.CreateBotRequest + CreateMeetingRequest(*chime.CreateMeetingInput) chime.CreateMeetingRequest + CreatePhoneNumberOrderRequest(*chime.CreatePhoneNumberOrderInput) chime.CreatePhoneNumberOrderRequest + CreateRoomRequest(*chime.CreateRoomInput) chime.CreateRoomRequest + + CreateRoomMembershipRequest(*chime.CreateRoomMembershipInput) chime.CreateRoomMembershipRequest + CreateVoiceConnectorRequest(*chime.CreateVoiceConnectorInput) chime.CreateVoiceConnectorRequest CreateVoiceConnectorGroupRequest(*chime.CreateVoiceConnectorGroupInput) chime.CreateVoiceConnectorGroupRequest DeleteAccountRequest(*chime.DeleteAccountInput) chime.DeleteAccountRequest + DeleteAttendeeRequest(*chime.DeleteAttendeeInput) chime.DeleteAttendeeRequest + DeleteEventsConfigurationRequest(*chime.DeleteEventsConfigurationInput) chime.DeleteEventsConfigurationRequest + DeleteMeetingRequest(*chime.DeleteMeetingInput) chime.DeleteMeetingRequest + DeletePhoneNumberRequest(*chime.DeletePhoneNumberInput) chime.DeletePhoneNumberRequest + DeleteRoomRequest(*chime.DeleteRoomInput) chime.DeleteRoomRequest + + DeleteRoomMembershipRequest(*chime.DeleteRoomMembershipInput) chime.DeleteRoomMembershipRequest + DeleteVoiceConnectorRequest(*chime.DeleteVoiceConnectorInput) chime.DeleteVoiceConnectorRequest DeleteVoiceConnectorGroupRequest(*chime.DeleteVoiceConnectorGroupInput) chime.DeleteVoiceConnectorGroupRequest @@ -115,18 +135,24 @@ type ClientAPI interface { GetAccountSettingsRequest(*chime.GetAccountSettingsInput) chime.GetAccountSettingsRequest + GetAttendeeRequest(*chime.GetAttendeeInput) chime.GetAttendeeRequest + GetBotRequest(*chime.GetBotInput) chime.GetBotRequest GetEventsConfigurationRequest(*chime.GetEventsConfigurationInput) chime.GetEventsConfigurationRequest GetGlobalSettingsRequest(*chime.GetGlobalSettingsInput) chime.GetGlobalSettingsRequest + GetMeetingRequest(*chime.GetMeetingInput) chime.GetMeetingRequest + GetPhoneNumberRequest(*chime.GetPhoneNumberInput) chime.GetPhoneNumberRequest GetPhoneNumberOrderRequest(*chime.GetPhoneNumberOrderInput) chime.GetPhoneNumberOrderRequest GetPhoneNumberSettingsRequest(*chime.GetPhoneNumberSettingsInput) chime.GetPhoneNumberSettingsRequest + GetRoomRequest(*chime.GetRoomInput) chime.GetRoomRequest + GetUserRequest(*chime.GetUserInput) chime.GetUserRequest GetUserSettingsRequest(*chime.GetUserSettingsInput) chime.GetUserSettingsRequest @@ -149,12 +175,20 @@ type ClientAPI interface { ListAccountsRequest(*chime.ListAccountsInput) chime.ListAccountsRequest + ListAttendeesRequest(*chime.ListAttendeesInput) chime.ListAttendeesRequest + ListBotsRequest(*chime.ListBotsInput) chime.ListBotsRequest + ListMeetingsRequest(*chime.ListMeetingsInput) chime.ListMeetingsRequest + ListPhoneNumberOrdersRequest(*chime.ListPhoneNumberOrdersInput) chime.ListPhoneNumberOrdersRequest ListPhoneNumbersRequest(*chime.ListPhoneNumbersInput) chime.ListPhoneNumbersRequest + ListRoomMembershipsRequest(*chime.ListRoomMembershipsInput) chime.ListRoomMembershipsRequest + + ListRoomsRequest(*chime.ListRoomsInput) chime.ListRoomsRequest + ListUsersRequest(*chime.ListUsersInput) chime.ListUsersRequest ListVoiceConnectorGroupsRequest(*chime.ListVoiceConnectorGroupsInput) chime.ListVoiceConnectorGroupsRequest @@ -197,6 +231,10 @@ type ClientAPI interface { UpdatePhoneNumberSettingsRequest(*chime.UpdatePhoneNumberSettingsInput) chime.UpdatePhoneNumberSettingsRequest + UpdateRoomRequest(*chime.UpdateRoomInput) chime.UpdateRoomRequest + + UpdateRoomMembershipRequest(*chime.UpdateRoomMembershipInput) chime.UpdateRoomMembershipRequest + UpdateUserRequest(*chime.UpdateUserInput) chime.UpdateUserRequest UpdateUserSettingsRequest(*chime.UpdateUserSettingsInput) chime.UpdateUserSettingsRequest diff --git a/service/cloudformation/api_enums.go b/service/cloudformation/api_enums.go index 7acae068116..642897d4d40 100644 --- a/service/cloudformation/api_enums.go +++ b/service/cloudformation/api_enums.go @@ -131,6 +131,23 @@ func (enum ChangeType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type DeprecatedStatus string + +// Enum values for DeprecatedStatus +const ( + DeprecatedStatusLive DeprecatedStatus = "LIVE" + DeprecatedStatusDeprecated DeprecatedStatus = "DEPRECATED" +) + +func (enum DeprecatedStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeprecatedStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type DifferenceType string // Enum values for DifferenceType @@ -187,6 +204,35 @@ func (enum ExecutionStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type HandlerErrorCode string + +// Enum values for HandlerErrorCode +const ( + HandlerErrorCodeNotUpdatable HandlerErrorCode = "NotUpdatable" + HandlerErrorCodeInvalidRequest HandlerErrorCode = "InvalidRequest" + HandlerErrorCodeAccessDenied HandlerErrorCode = "AccessDenied" + HandlerErrorCodeInvalidCredentials HandlerErrorCode = "InvalidCredentials" + HandlerErrorCodeAlreadyExists HandlerErrorCode = "AlreadyExists" + HandlerErrorCodeNotFound HandlerErrorCode = "NotFound" + HandlerErrorCodeResourceConflict HandlerErrorCode = "ResourceConflict" + HandlerErrorCodeThrottling HandlerErrorCode = "Throttling" + HandlerErrorCodeServiceLimitExceeded HandlerErrorCode = "ServiceLimitExceeded" + HandlerErrorCodeNotStabilized HandlerErrorCode = "NotStabilized" + HandlerErrorCodeGeneralServiceException HandlerErrorCode = "GeneralServiceException" + HandlerErrorCodeServiceInternalError HandlerErrorCode = "ServiceInternalError" + HandlerErrorCodeNetworkFailure HandlerErrorCode = "NetworkFailure" + HandlerErrorCodeInternalFailure HandlerErrorCode = "InternalFailure" +) + +func (enum HandlerErrorCode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum HandlerErrorCode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type OnFailure string // Enum values for OnFailure @@ -205,6 +251,77 @@ func (enum OnFailure) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type OperationStatus string + +// Enum values for OperationStatus +const ( + OperationStatusPending OperationStatus = "PENDING" + OperationStatusInProgress OperationStatus = "IN_PROGRESS" + OperationStatusSuccess OperationStatus = "SUCCESS" + OperationStatusFailed OperationStatus = "FAILED" +) + +func (enum OperationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OperationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ProvisioningType string + +// Enum values for ProvisioningType +const ( + ProvisioningTypeNonProvisionable ProvisioningType = "NON_PROVISIONABLE" + ProvisioningTypeImmutable ProvisioningType = "IMMUTABLE" + ProvisioningTypeFullyMutable ProvisioningType = "FULLY_MUTABLE" +) + +func (enum ProvisioningType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ProvisioningType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type RegistrationStatus string + +// Enum values for RegistrationStatus +const ( + RegistrationStatusComplete RegistrationStatus = "COMPLETE" + RegistrationStatusInProgress RegistrationStatus = "IN_PROGRESS" + RegistrationStatusFailed RegistrationStatus = "FAILED" +) + +func (enum RegistrationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RegistrationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type RegistryType string + +// Enum values for RegistryType +const ( + RegistryTypeResource RegistryType = "RESOURCE" +) + +func (enum RegistryType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RegistryType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type Replacement string // Enum values for Replacement @@ -384,13 +501,52 @@ func (enum StackResourceDriftStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type StackSetDriftDetectionStatus string + +// Enum values for StackSetDriftDetectionStatus +const ( + StackSetDriftDetectionStatusCompleted StackSetDriftDetectionStatus = "COMPLETED" + StackSetDriftDetectionStatusFailed StackSetDriftDetectionStatus = "FAILED" + StackSetDriftDetectionStatusPartialSuccess StackSetDriftDetectionStatus = "PARTIAL_SUCCESS" + StackSetDriftDetectionStatusInProgress StackSetDriftDetectionStatus = "IN_PROGRESS" + StackSetDriftDetectionStatusStopped StackSetDriftDetectionStatus = "STOPPED" +) + +func (enum StackSetDriftDetectionStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum StackSetDriftDetectionStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type StackSetDriftStatus string + +// Enum values for StackSetDriftStatus +const ( + StackSetDriftStatusDrifted StackSetDriftStatus = "DRIFTED" + StackSetDriftStatusInSync StackSetDriftStatus = "IN_SYNC" + StackSetDriftStatusNotChecked StackSetDriftStatus = "NOT_CHECKED" +) + +func (enum StackSetDriftStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum StackSetDriftStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type StackSetOperationAction string // Enum values for StackSetOperationAction const ( - StackSetOperationActionCreate StackSetOperationAction = "CREATE" - StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" - StackSetOperationActionDelete StackSetOperationAction = "DELETE" + StackSetOperationActionCreate StackSetOperationAction = "CREATE" + StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" + StackSetOperationActionDelete StackSetOperationAction = "DELETE" + StackSetOperationActionDetectDrift StackSetOperationAction = "DETECT_DRIFT" ) func (enum StackSetOperationAction) MarshalValue() (string, error) { @@ -512,3 +668,20 @@ func (enum TemplateStage) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +type Visibility string + +// Enum values for Visibility +const ( + VisibilityPublic Visibility = "PUBLIC" + VisibilityPrivate Visibility = "PRIVATE" +) + +func (enum Visibility) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Visibility) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/cloudformation/api_errors.go b/service/cloudformation/api_errors.go index 9ced8d7bd6c..f2312e9fb76 100644 --- a/service/cloudformation/api_errors.go +++ b/service/cloudformation/api_errors.go @@ -10,6 +10,12 @@ const ( // The resource with the name requested already exists. ErrCodeAlreadyExistsException = "AlreadyExistsException" + // ErrCodeCFNRegistryException for service response error code + // "CFNRegistryException". + // + // An error occurred during a CloudFormation registry operation. + ErrCodeCFNRegistryException = "CFNRegistryException" + // ErrCodeChangeSetNotFoundException for service response error code // "ChangeSetNotFound". // @@ -44,6 +50,13 @@ const ( // The specified operation isn't valid. ErrCodeInvalidOperationException = "InvalidOperationException" + // ErrCodeInvalidStateTransitionException for service response error code + // "InvalidStateTransition". + // + // Error reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // CloudFormation does not return this error to users. + ErrCodeInvalidStateTransitionException = "InvalidStateTransition" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // @@ -78,6 +91,13 @@ const ( // The specified ID refers to an operation that doesn't exist. ErrCodeOperationNotFoundException = "OperationNotFoundException" + // ErrCodeOperationStatusCheckFailedException for service response error code + // "ConditionalCheckFailed". + // + // Error reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // CloudFormation does not return this error to users. + ErrCodeOperationStatusCheckFailedException = "ConditionalCheckFailed" + // ErrCodeStackInstanceNotFoundException for service response error code // "StackInstanceNotFoundException". // @@ -110,4 +130,10 @@ const ( // // A client request token already exists. ErrCodeTokenAlreadyExistsException = "TokenAlreadyExistsException" + + // ErrCodeTypeNotFoundException for service response error code + // "TypeNotFoundException". + // + // The specified type does not exist in the CloudFormation registry. + ErrCodeTypeNotFoundException = "TypeNotFoundException" ) diff --git a/service/cloudformation/api_integ_test.go b/service/cloudformation/api_integ_test.go index 1969917723b..da02286ceea 100644 --- a/service/cloudformation/api_integ_test.go +++ b/service/cloudformation/api_integ_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" "github.com/aws/aws-sdk-go-v2/internal/awstesting/integration" "github.com/aws/aws-sdk-go-v2/service/cloudformation" ) @@ -27,7 +28,7 @@ func TestInteg_00_ListStacks(t *testing.T) { params := &cloudformation.ListStacksInput{} req := svc.ListStacksRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err != nil { t.Errorf("expect no error, got %v", err) @@ -45,7 +46,7 @@ func TestInteg_01_CreateStack(t *testing.T) { } req := svc.CreateStackRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err == nil { t.Fatalf("expect request to fail") diff --git a/service/cloudformation/api_op_DeregisterType.go b/service/cloudformation/api_op_DeregisterType.go new file mode 100644 index 00000000000..3eb05290e9b --- /dev/null +++ b/service/cloudformation/api_op_DeregisterType.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeregisterTypeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeregisterTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeregisterTypeInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeregisterType = "DeregisterType" + +// DeregisterTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Removes a type or type version from active use in the CloudFormation registry. +// If a type or type version is deregistered, it cannot be used in CloudFormation +// operations. +// +// To deregister a type, you must individually deregister all registered versions +// of that type. If a type has only a single registered version, deregistering +// that version results in the type itself being deregistered. +// +// You cannot deregister the default version of a type, unless it is the only +// registered version of that type, in which case the type itself is deregistered +// as well. +// +// // Example sending a request using DeregisterTypeRequest. +// req := client.DeregisterTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeregisterType +func (c *Client) DeregisterTypeRequest(input *DeregisterTypeInput) DeregisterTypeRequest { + op := &aws.Operation{ + Name: opDeregisterType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTypeInput{} + } + + req := c.newRequest(op, input, &DeregisterTypeOutput{}) + return DeregisterTypeRequest{Request: req, Input: input, Copy: c.DeregisterTypeRequest} +} + +// DeregisterTypeRequest is the request type for the +// DeregisterType API operation. +type DeregisterTypeRequest struct { + *aws.Request + Input *DeregisterTypeInput + Copy func(*DeregisterTypeInput) DeregisterTypeRequest +} + +// Send marshals and sends the DeregisterType API request. +func (r DeregisterTypeRequest) Send(ctx context.Context) (*DeregisterTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeregisterTypeResponse{ + DeregisterTypeOutput: r.Request.Data.(*DeregisterTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeregisterTypeResponse is the response type for the +// DeregisterType API operation. +type DeregisterTypeResponse struct { + *DeregisterTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeregisterType request. +func (r *DeregisterTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DescribeType.go b/service/cloudformation/api_op_DescribeType.go new file mode 100644 index 00000000000..76f4a4a5532 --- /dev/null +++ b/service/cloudformation/api_op_DescribeType.go @@ -0,0 +1,227 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeTypeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + // + // If you specify a VersionId, DescribeType returns information about that specific + // type version. Otherwise, it returns information about the default type version. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTypeInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTypeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + Arn *string `type:"string"` + + // The ID of the default version of the type. The default version is used when + // the type version is not specified. + // + // To set the default version of a type, use SetTypeDefaultVersion . + DefaultVersionId *string `min:"1" type:"string"` + + // The deprecation status of the type. + // + // Valid values include: + // + // * LIVE: The type is registered and can be used in CloudFormation operations, + // dependent on its provisioning behavior and visibility scope. + // + // * DEPRECATED: The type has been deregistered and can no longer be used + // in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The description of the registered type. + Description *string `min:"1" type:"string"` + + // The URL of a page providing detailed documentation for this type. + DocumentationUrl *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM execution role used to register + // the type. If your resource type calls AWS APIs in any of its handlers, you + // must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) + // that includes the necessary permissions to call those AWS APIs, and provision + // that execution role in your account. CloudFormation then assumes that execution + // role to provide your resource type with the appropriate credentials. + ExecutionRoleArn *string `min:"1" type:"string"` + + // When the specified type version was registered. + LastUpdated *time.Time `type:"timestamp"` + + // Contains logging configuration information for a type. + LoggingConfig *LoggingConfig `type:"structure"` + + // The provisioning behavior of the type. AWS CloudFormation determines the + // provisioning type during registration, based on the types of handlers in + // the schema handler package submitted. + // + // Valid values include: + // + // * FULLY_MUTABLE: The type includes an update handler to process updates + // to the type during stack update operations. + // + // * IMMUTABLE: The type does not include an update handler, so the type + // cannot be updated and must instead be replaced during stack update operations. + // + // * NON_PROVISIONABLE: The type does not include all of the following handlers, + // and therefore cannot actually be provisioned. create read delete + ProvisioningType ProvisioningType `type:"string" enum:"true"` + + // The schema that defines the type. + // + // For more information on type schemas, see Resource Provider Schema (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-schema.html) + // in the CloudFormation CLI User Guide. + Schema *string `min:"1" type:"string"` + + // The URL of the source code for the type. + SourceUrl *string `type:"string"` + + // When the specified type version was registered. + TimeCreated *time.Time `type:"timestamp"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the registered type. + TypeName *string `min:"10" type:"string"` + + // The scope at which the type is visible and usable in CloudFormation operations. + // + // Valid values include: + // + // * PRIVATE: The type is only visible and usable within the account in which + // it is registered. Currently, AWS CloudFormation marks any types you register + // as PRIVATE. + // + // * PUBLIC: The type is publically visible and usable within any Amazon + // account. + Visibility Visibility `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DescribeTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeType = "DescribeType" + +// DescribeTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns detailed information about a type that has been registered. +// +// If you specify a VersionId, DescribeType returns information about that specific +// type version. Otherwise, it returns information about the default type version. +// +// // Example sending a request using DescribeTypeRequest. +// req := client.DescribeTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeType +func (c *Client) DescribeTypeRequest(input *DescribeTypeInput) DescribeTypeRequest { + op := &aws.Operation{ + Name: opDescribeType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTypeInput{} + } + + req := c.newRequest(op, input, &DescribeTypeOutput{}) + return DescribeTypeRequest{Request: req, Input: input, Copy: c.DescribeTypeRequest} +} + +// DescribeTypeRequest is the request type for the +// DescribeType API operation. +type DescribeTypeRequest struct { + *aws.Request + Input *DescribeTypeInput + Copy func(*DescribeTypeInput) DescribeTypeRequest +} + +// Send marshals and sends the DescribeType API request. +func (r DescribeTypeRequest) Send(ctx context.Context) (*DescribeTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTypeResponse{ + DescribeTypeOutput: r.Request.Data.(*DescribeTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTypeResponse is the response type for the +// DescribeType API operation. +type DescribeTypeResponse struct { + *DescribeTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeType request. +func (r *DescribeTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DescribeTypeRegistration.go b/service/cloudformation/api_op_DescribeTypeRegistration.go new file mode 100644 index 00000000000..97b89ebb014 --- /dev/null +++ b/service/cloudformation/api_op_DescribeTypeRegistration.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeTypeRegistrationInput struct { + _ struct{} `type:"structure"` + + // The identifier for this registration request. + // + // This registration token is generated by CloudFormation when you initiate + // a registration request using RegisterType . + // + // RegistrationToken is a required field + RegistrationToken *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTypeRegistrationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTypeRegistrationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTypeRegistrationInput"} + + if s.RegistrationToken == nil { + invalidParams.Add(aws.NewErrParamRequired("RegistrationToken")) + } + if s.RegistrationToken != nil && len(*s.RegistrationToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RegistrationToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTypeRegistrationOutput struct { + _ struct{} `type:"structure"` + + // The description of the type registration request. + Description *string `min:"1" type:"string"` + + // The current status of the type registration request. + ProgressStatus RegistrationStatus `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type being registered. + // + // For registration requests with a ProgressStatus of other than COMPLETE, this + // will be null. + TypeArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of this specific version of the type being + // registered. + // + // For registration requests with a ProgressStatus of other than COMPLETE, this + // will be null. + TypeVersionArn *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTypeRegistrationOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeTypeRegistration = "DescribeTypeRegistration" + +// DescribeTypeRegistrationRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns information about a type's registration, including its current status +// and type and version identifiers. +// +// When you initiate a registration request using RegisterType , you can then +// use DescribeTypeRegistration to monitor the progress of that registration +// request. +// +// Once the registration request has completed, use DescribeType to return detailed +// informaiton about a type. +// +// // Example sending a request using DescribeTypeRegistrationRequest. +// req := client.DescribeTypeRegistrationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeTypeRegistration +func (c *Client) DescribeTypeRegistrationRequest(input *DescribeTypeRegistrationInput) DescribeTypeRegistrationRequest { + op := &aws.Operation{ + Name: opDescribeTypeRegistration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTypeRegistrationInput{} + } + + req := c.newRequest(op, input, &DescribeTypeRegistrationOutput{}) + return DescribeTypeRegistrationRequest{Request: req, Input: input, Copy: c.DescribeTypeRegistrationRequest} +} + +// DescribeTypeRegistrationRequest is the request type for the +// DescribeTypeRegistration API operation. +type DescribeTypeRegistrationRequest struct { + *aws.Request + Input *DescribeTypeRegistrationInput + Copy func(*DescribeTypeRegistrationInput) DescribeTypeRegistrationRequest +} + +// Send marshals and sends the DescribeTypeRegistration API request. +func (r DescribeTypeRegistrationRequest) Send(ctx context.Context) (*DescribeTypeRegistrationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTypeRegistrationResponse{ + DescribeTypeRegistrationOutput: r.Request.Data.(*DescribeTypeRegistrationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTypeRegistrationResponse is the response type for the +// DescribeTypeRegistration API operation. +type DescribeTypeRegistrationResponse struct { + *DescribeTypeRegistrationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeTypeRegistration request. +func (r *DescribeTypeRegistrationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DetectStackSetDrift.go b/service/cloudformation/api_op_DetectStackSetDrift.go new file mode 100644 index 00000000000..4c791495107 --- /dev/null +++ b/service/cloudformation/api_op_DetectStackSetDrift.go @@ -0,0 +1,171 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DetectStackSetDriftInput struct { + _ struct{} `type:"structure"` + + // The ID of the stack set operation. + OperationId *string `min:"1" type:"string" idempotencyToken:"true"` + + // The user-specified preferences for how AWS CloudFormation performs a stack + // set operation. + // + // For more information on maximum concurrent accounts and failure tolerance, + // see Stack set operation options (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-ops-options). + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // The name of the stack set on which to perform the drift detection operation. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetectStackSetDriftInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetectStackSetDriftInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DetectStackSetDriftInput"} + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OperationId", 1)) + } + + if s.StackSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("StackSetName")) + } + if s.OperationPreferences != nil { + if err := s.OperationPreferences.Validate(); err != nil { + invalidParams.AddNested("OperationPreferences", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetectStackSetDriftOutput struct { + _ struct{} `type:"structure"` + + // The ID of the drift detection stack set operation. + // + // you can use this operation id with DescribeStackSetOperation to monitor the + // progress of the drift detection operation. + OperationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DetectStackSetDriftOutput) String() string { + return awsutil.Prettify(s) +} + +const opDetectStackSetDrift = "DetectStackSetDrift" + +// DetectStackSetDriftRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Detect drift on a stack set. When CloudFormation performs drift detection +// on a stack set, it performs drift detection on the stack associated with +// each stack instance in the stack set. For more information, see How CloudFormation +// Performs Drift Detection on a Stack Set (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html). +// +// DetectStackSetDrift returns the OperationId of the stack set drift detection +// operation. Use this operation id with DescribeStackSetOperation to monitor +// the progress of the drift detection operation. The drift detection operation +// may take some time, depending on the number of stack instances included in +// the stack set, as well as the number of resources included in each stack. +// +// Once the operation has completed, use the following actions to return drift +// information: +// +// * Use DescribeStackSet to return detailed informaiton about the stack +// set, including detailed information about the last completed drift operation +// performed on the stack set. (Information about drift operations that are +// in progress is not included.) +// +// * Use ListStackInstances to return a list of stack instances belonging +// to the stack set, including the drift status and last drift time checked +// of each instance. +// +// * Use DescribeStackInstance to return detailed information about a specific +// stack instance, including its drift status and last drift time checked. +// +// For more information on performing a drift detection operation on a stack +// set, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html). +// +// You can only run a single drift detection operation on a given stack set +// at one time. +// +// To stop a drift detection stack set operation, use StopStackSetOperation . +// +// // Example sending a request using DetectStackSetDriftRequest. +// req := client.DetectStackSetDriftRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DetectStackSetDrift +func (c *Client) DetectStackSetDriftRequest(input *DetectStackSetDriftInput) DetectStackSetDriftRequest { + op := &aws.Operation{ + Name: opDetectStackSetDrift, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetectStackSetDriftInput{} + } + + req := c.newRequest(op, input, &DetectStackSetDriftOutput{}) + return DetectStackSetDriftRequest{Request: req, Input: input, Copy: c.DetectStackSetDriftRequest} +} + +// DetectStackSetDriftRequest is the request type for the +// DetectStackSetDrift API operation. +type DetectStackSetDriftRequest struct { + *aws.Request + Input *DetectStackSetDriftInput + Copy func(*DetectStackSetDriftInput) DetectStackSetDriftRequest +} + +// Send marshals and sends the DetectStackSetDrift API request. +func (r DetectStackSetDriftRequest) Send(ctx context.Context) (*DetectStackSetDriftResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DetectStackSetDriftResponse{ + DetectStackSetDriftOutput: r.Request.Data.(*DetectStackSetDriftOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DetectStackSetDriftResponse is the response type for the +// DetectStackSetDrift API operation. +type DetectStackSetDriftResponse struct { + *DetectStackSetDriftOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DetectStackSetDrift request. +func (r *DetectStackSetDriftResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypeRegistrations.go b/service/cloudformation/api_op_ListTypeRegistrations.go new file mode 100644 index 00000000000..628e0c8a71d --- /dev/null +++ b/service/cloudformation/api_op_ListTypeRegistrations.go @@ -0,0 +1,211 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypeRegistrationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The current status of the type registration request. + RegistrationStatusFilter RegistrationStatus `type:"string" enum:"true"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeArn *string `type:"string"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s ListTypeRegistrationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypeRegistrationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypeRegistrationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypeRegistrationsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of type registration tokens. + // + // Use DescribeTypeRegistration to return detailed information about a type + // registration request. + RegistrationTokenList []string `type:"list"` +} + +// String returns the string representation +func (s ListTypeRegistrationsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypeRegistrations = "ListTypeRegistrations" + +// ListTypeRegistrationsRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns a list of registration tokens for the specified type. +// +// // Example sending a request using ListTypeRegistrationsRequest. +// req := client.ListTypeRegistrationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypeRegistrations +func (c *Client) ListTypeRegistrationsRequest(input *ListTypeRegistrationsInput) ListTypeRegistrationsRequest { + op := &aws.Operation{ + Name: opListTypeRegistrations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypeRegistrationsInput{} + } + + req := c.newRequest(op, input, &ListTypeRegistrationsOutput{}) + return ListTypeRegistrationsRequest{Request: req, Input: input, Copy: c.ListTypeRegistrationsRequest} +} + +// ListTypeRegistrationsRequest is the request type for the +// ListTypeRegistrations API operation. +type ListTypeRegistrationsRequest struct { + *aws.Request + Input *ListTypeRegistrationsInput + Copy func(*ListTypeRegistrationsInput) ListTypeRegistrationsRequest +} + +// Send marshals and sends the ListTypeRegistrations API request. +func (r ListTypeRegistrationsRequest) Send(ctx context.Context) (*ListTypeRegistrationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypeRegistrationsResponse{ + ListTypeRegistrationsOutput: r.Request.Data.(*ListTypeRegistrationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypeRegistrationsRequestPaginator returns a paginator for ListTypeRegistrations. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypeRegistrationsRequest(input) +// p := cloudformation.NewListTypeRegistrationsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypeRegistrationsPaginator(req ListTypeRegistrationsRequest) ListTypeRegistrationsPaginator { + return ListTypeRegistrationsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypeRegistrationsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypeRegistrationsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypeRegistrationsPaginator struct { + aws.Pager +} + +func (p *ListTypeRegistrationsPaginator) CurrentPage() *ListTypeRegistrationsOutput { + return p.Pager.CurrentPage().(*ListTypeRegistrationsOutput) +} + +// ListTypeRegistrationsResponse is the response type for the +// ListTypeRegistrations API operation. +type ListTypeRegistrationsResponse struct { + *ListTypeRegistrationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypeRegistrations request. +func (r *ListTypeRegistrationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypeVersions.go b/service/cloudformation/api_op_ListTypeVersions.go new file mode 100644 index 00000000000..7559d037af2 --- /dev/null +++ b/service/cloudformation/api_op_ListTypeVersions.go @@ -0,0 +1,219 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypeVersionsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type for which you want version summary + // information. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The deprecation status of the type versions that you want to get summary + // information about. + // + // Valid values include: + // + // * LIVE: The type version is registered and can be used in CloudFormation + // operations, dependent on its provisioning behavior and visibility scope. + // + // * DEPRECATED: The type version has been deregistered and can no longer + // be used in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The kind of the type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type for which you want version summary information. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s ListTypeVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypeVersionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypeVersionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypeVersionsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of TypeVersionSummary structures that contain information about the + // specified type's versions. + TypeVersionSummaries []TypeVersionSummary `type:"list"` +} + +// String returns the string representation +func (s ListTypeVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypeVersions = "ListTypeVersions" + +// ListTypeVersionsRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns summary information about the versions of a type. +// +// // Example sending a request using ListTypeVersionsRequest. +// req := client.ListTypeVersionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypeVersions +func (c *Client) ListTypeVersionsRequest(input *ListTypeVersionsInput) ListTypeVersionsRequest { + op := &aws.Operation{ + Name: opListTypeVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypeVersionsInput{} + } + + req := c.newRequest(op, input, &ListTypeVersionsOutput{}) + return ListTypeVersionsRequest{Request: req, Input: input, Copy: c.ListTypeVersionsRequest} +} + +// ListTypeVersionsRequest is the request type for the +// ListTypeVersions API operation. +type ListTypeVersionsRequest struct { + *aws.Request + Input *ListTypeVersionsInput + Copy func(*ListTypeVersionsInput) ListTypeVersionsRequest +} + +// Send marshals and sends the ListTypeVersions API request. +func (r ListTypeVersionsRequest) Send(ctx context.Context) (*ListTypeVersionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypeVersionsResponse{ + ListTypeVersionsOutput: r.Request.Data.(*ListTypeVersionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypeVersionsRequestPaginator returns a paginator for ListTypeVersions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypeVersionsRequest(input) +// p := cloudformation.NewListTypeVersionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypeVersionsPaginator(req ListTypeVersionsRequest) ListTypeVersionsPaginator { + return ListTypeVersionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypeVersionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypeVersionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypeVersionsPaginator struct { + aws.Pager +} + +func (p *ListTypeVersionsPaginator) CurrentPage() *ListTypeVersionsOutput { + return p.Pager.CurrentPage().(*ListTypeVersionsOutput) +} + +// ListTypeVersionsResponse is the response type for the +// ListTypeVersions API operation. +type ListTypeVersionsResponse struct { + *ListTypeVersionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypeVersions request. +func (r *ListTypeVersionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypes.go b/service/cloudformation/api_op_ListTypes.go new file mode 100644 index 00000000000..76b7b7d6deb --- /dev/null +++ b/service/cloudformation/api_op_ListTypes.go @@ -0,0 +1,227 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypesInput struct { + _ struct{} `type:"structure"` + + // The deprecation status of the types that you want to get summary information + // about. + // + // Valid values include: + // + // * LIVE: The type is registered for use in CloudFormation operations. + // + // * DEPRECATED: The type has been deregistered and can no longer be used + // in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The provisioning behavior of the type. AWS CloudFormation determines the + // provisioning type during registration, based on the types of handlers in + // the schema handler package submitted. + // + // Valid values include: + // + // * FULLY_MUTABLE: The type includes an update handler to process updates + // to the type during stack update operations. + // + // * IMMUTABLE: The type does not include an update handler, so the type + // cannot be updated and must instead be replaced during stack update operations. + // + // * NON_PROVISIONABLE: The type does not include create, read, and delete + // handlers, and therefore cannot actually be provisioned. + ProvisioningType ProvisioningType `type:"string" enum:"true"` + + // The scope at which the type is visible and usable in CloudFormation operations. + // + // Valid values include: + // + // * PRIVATE: The type is only visible and usable within the account in which + // it is registered. Currently, AWS CloudFormation marks any types you create + // as PRIVATE. + // + // * PUBLIC: The type is publically visible and usable within any Amazon + // account. + Visibility Visibility `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ListTypesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypesOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of TypeSummary structures that contain information about the specified + // types. + TypeSummaries []TypeSummary `type:"list"` +} + +// String returns the string representation +func (s ListTypesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypes = "ListTypes" + +// ListTypesRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns summary information about types that have been registered with CloudFormation. +// +// // Example sending a request using ListTypesRequest. +// req := client.ListTypesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypes +func (c *Client) ListTypesRequest(input *ListTypesInput) ListTypesRequest { + op := &aws.Operation{ + Name: opListTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypesInput{} + } + + req := c.newRequest(op, input, &ListTypesOutput{}) + return ListTypesRequest{Request: req, Input: input, Copy: c.ListTypesRequest} +} + +// ListTypesRequest is the request type for the +// ListTypes API operation. +type ListTypesRequest struct { + *aws.Request + Input *ListTypesInput + Copy func(*ListTypesInput) ListTypesRequest +} + +// Send marshals and sends the ListTypes API request. +func (r ListTypesRequest) Send(ctx context.Context) (*ListTypesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypesResponse{ + ListTypesOutput: r.Request.Data.(*ListTypesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypesRequestPaginator returns a paginator for ListTypes. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypesRequest(input) +// p := cloudformation.NewListTypesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypesPaginator(req ListTypesRequest) ListTypesPaginator { + return ListTypesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypesPaginator struct { + aws.Pager +} + +func (p *ListTypesPaginator) CurrentPage() *ListTypesOutput { + return p.Pager.CurrentPage().(*ListTypesOutput) +} + +// ListTypesResponse is the response type for the +// ListTypes API operation. +type ListTypesResponse struct { + *ListTypesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypes request. +func (r *ListTypesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_RecordHandlerProgress.go b/service/cloudformation/api_op_RecordHandlerProgress.go new file mode 100644 index 00000000000..175310ea9aa --- /dev/null +++ b/service/cloudformation/api_op_RecordHandlerProgress.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type RecordHandlerProgressInput struct { + _ struct{} `type:"structure"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // + // BearerToken is a required field + BearerToken *string `min:"1" type:"string" required:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ClientRequestToken *string `min:"1" type:"string"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + CurrentOperationStatus OperationStatus `type:"string" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ErrorCode HandlerErrorCode `type:"string" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // + // OperationStatus is a required field + OperationStatus OperationStatus `type:"string" required:"true" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ResourceModel *string `min:"1" type:"string"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + StatusMessage *string `type:"string"` +} + +// String returns the string representation +func (s RecordHandlerProgressInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordHandlerProgressInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RecordHandlerProgressInput"} + + if s.BearerToken == nil { + invalidParams.Add(aws.NewErrParamRequired("BearerToken")) + } + if s.BearerToken != nil && len(*s.BearerToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("BearerToken", 1)) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 1)) + } + if len(s.OperationStatus) == 0 { + invalidParams.Add(aws.NewErrParamRequired("OperationStatus")) + } + if s.ResourceModel != nil && len(*s.ResourceModel) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceModel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RecordHandlerProgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RecordHandlerProgressOutput) String() string { + return awsutil.Prettify(s) +} + +const opRecordHandlerProgress = "RecordHandlerProgress" + +// RecordHandlerProgressRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Reports progress of a resource handler to CloudFormation. +// +// Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). +// Do not use this API in your code. +// +// // Example sending a request using RecordHandlerProgressRequest. +// req := client.RecordHandlerProgressRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/RecordHandlerProgress +func (c *Client) RecordHandlerProgressRequest(input *RecordHandlerProgressInput) RecordHandlerProgressRequest { + op := &aws.Operation{ + Name: opRecordHandlerProgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordHandlerProgressInput{} + } + + req := c.newRequest(op, input, &RecordHandlerProgressOutput{}) + return RecordHandlerProgressRequest{Request: req, Input: input, Copy: c.RecordHandlerProgressRequest} +} + +// RecordHandlerProgressRequest is the request type for the +// RecordHandlerProgress API operation. +type RecordHandlerProgressRequest struct { + *aws.Request + Input *RecordHandlerProgressInput + Copy func(*RecordHandlerProgressInput) RecordHandlerProgressRequest +} + +// Send marshals and sends the RecordHandlerProgress API request. +func (r RecordHandlerProgressRequest) Send(ctx context.Context) (*RecordHandlerProgressResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RecordHandlerProgressResponse{ + RecordHandlerProgressOutput: r.Request.Data.(*RecordHandlerProgressOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RecordHandlerProgressResponse is the response type for the +// RecordHandlerProgress API operation. +type RecordHandlerProgressResponse struct { + *RecordHandlerProgressOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RecordHandlerProgress request. +func (r *RecordHandlerProgressResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_RegisterType.go b/service/cloudformation/api_op_RegisterType.go new file mode 100644 index 00000000000..32a7356b78a --- /dev/null +++ b/service/cloudformation/api_op_RegisterType.go @@ -0,0 +1,208 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type RegisterTypeInput struct { + _ struct{} `type:"structure"` + + // A unique identifier that acts as an idempotency key for this registration + // request. Specifying a client request token prevents CloudFormation from generating + // more than one version of a type from the same registeration request, even + // if the request is submitted multiple times. + ClientRequestToken *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM execution role to use to register + // the type. If your resource type calls AWS APIs in any of its handlers, you + // must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) + // that includes the necessary permissions to call those AWS APIs, and provision + // that execution role in your account. CloudFormation then assumes that execution + // role to provide your resource type with the appropriate credentials. + ExecutionRoleArn *string `min:"1" type:"string"` + + // Specifies logging configuration information for a type. + LoggingConfig *LoggingConfig `type:"structure"` + + // A url to the S3 bucket containing the schema handler package that contains + // the schema, event handlers, and associated files for the type you want to + // register. + // + // For information on generating a schema handler package for the type you want + // to register, see submit (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) + // in the CloudFormation CLI User Guide. + // + // SchemaHandlerPackage is a required field + SchemaHandlerPackage *string `min:"1" type:"string" required:"true"` + + // The kind of type. + // + // Currently, the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type being registered. + // + // We recommend that type names adhere to the following pattern: company_or_organization::service::type. + // + // The following organization namespaces are reserved and cannot be used in + // your resource type names: + // + // * Alexa + // + // * AMZN + // + // * Amazon + // + // * AWS + // + // * Custom + // + // * Dev + // + // TypeName is a required field + TypeName *string `min:"10" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RegisterTypeInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ExecutionRoleArn", 1)) + } + + if s.SchemaHandlerPackage == nil { + invalidParams.Add(aws.NewErrParamRequired("SchemaHandlerPackage")) + } + if s.SchemaHandlerPackage != nil && len(*s.SchemaHandlerPackage) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SchemaHandlerPackage", 1)) + } + + if s.TypeName == nil { + invalidParams.Add(aws.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.LoggingConfig != nil { + if err := s.LoggingConfig.Validate(); err != nil { + invalidParams.AddNested("LoggingConfig", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterTypeOutput struct { + _ struct{} `type:"structure"` + + // The identifier for this registration request. + // + // Use this registration token when calling DescribeTypeRegistration , which + // returns information about the status and IDs of the type registration. + RegistrationToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RegisterTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opRegisterType = "RegisterType" + +// RegisterTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Registers a type with the CloudFormation service. Registering a type makes +// it available for use in CloudFormation templates in your AWS account, and +// includes: +// +// * Validating the resource schema +// +// * Determining which handlers have been specified for the resource +// +// * Making the resource type available for use in your account +// +// For more information on how to develop types and ready them for registeration, +// see Creating Resource Providers (cloudformation-cli/latest/userguide/resource-types.html) +// in the CloudFormation CLI User Guide. +// +// Once you have initiated a registration request using RegisterType , you can +// use DescribeTypeRegistration to monitor the progress of the registration +// request. +// +// // Example sending a request using RegisterTypeRequest. +// req := client.RegisterTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/RegisterType +func (c *Client) RegisterTypeRequest(input *RegisterTypeInput) RegisterTypeRequest { + op := &aws.Operation{ + Name: opRegisterType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTypeInput{} + } + + req := c.newRequest(op, input, &RegisterTypeOutput{}) + return RegisterTypeRequest{Request: req, Input: input, Copy: c.RegisterTypeRequest} +} + +// RegisterTypeRequest is the request type for the +// RegisterType API operation. +type RegisterTypeRequest struct { + *aws.Request + Input *RegisterTypeInput + Copy func(*RegisterTypeInput) RegisterTypeRequest +} + +// Send marshals and sends the RegisterType API request. +func (r RegisterTypeRequest) Send(ctx context.Context) (*RegisterTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RegisterTypeResponse{ + RegisterTypeOutput: r.Request.Data.(*RegisterTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RegisterTypeResponse is the response type for the +// RegisterType API operation. +type RegisterTypeResponse struct { + *RegisterTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RegisterType request. +func (r *RegisterTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_SetTypeDefaultVersion.go b/service/cloudformation/api_op_SetTypeDefaultVersion.go new file mode 100644 index 00000000000..cd4dde3a1d7 --- /dev/null +++ b/service/cloudformation/api_op_SetTypeDefaultVersion.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type SetTypeDefaultVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type for which you want version summary + // information. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SetTypeDefaultVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTypeDefaultVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SetTypeDefaultVersionInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTypeDefaultVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTypeDefaultVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opSetTypeDefaultVersion = "SetTypeDefaultVersion" + +// SetTypeDefaultVersionRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Specify the default version of a type. The default version of a type will +// be used in CloudFormation operations. +// +// // Example sending a request using SetTypeDefaultVersionRequest. +// req := client.SetTypeDefaultVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/SetTypeDefaultVersion +func (c *Client) SetTypeDefaultVersionRequest(input *SetTypeDefaultVersionInput) SetTypeDefaultVersionRequest { + op := &aws.Operation{ + Name: opSetTypeDefaultVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTypeDefaultVersionInput{} + } + + req := c.newRequest(op, input, &SetTypeDefaultVersionOutput{}) + return SetTypeDefaultVersionRequest{Request: req, Input: input, Copy: c.SetTypeDefaultVersionRequest} +} + +// SetTypeDefaultVersionRequest is the request type for the +// SetTypeDefaultVersion API operation. +type SetTypeDefaultVersionRequest struct { + *aws.Request + Input *SetTypeDefaultVersionInput + Copy func(*SetTypeDefaultVersionInput) SetTypeDefaultVersionRequest +} + +// Send marshals and sends the SetTypeDefaultVersion API request. +func (r SetTypeDefaultVersionRequest) Send(ctx context.Context) (*SetTypeDefaultVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SetTypeDefaultVersionResponse{ + SetTypeDefaultVersionOutput: r.Request.Data.(*SetTypeDefaultVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// SetTypeDefaultVersionResponse is the response type for the +// SetTypeDefaultVersion API operation. +type SetTypeDefaultVersionResponse struct { + *SetTypeDefaultVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SetTypeDefaultVersion request. +func (r *SetTypeDefaultVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_types.go b/service/cloudformation/api_types.go index ea9f28a6ccb..cbc1a627640 100644 --- a/service/cloudformation/api_types.go +++ b/service/cloudformation/api_types.go @@ -178,6 +178,52 @@ func (s Export) String() string { return awsutil.Prettify(s) } +// Contains logging configuration information for a type. +type LoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon CloudWatch log group to which CloudFormation sends error logging + // information when invoking the type's handlers. + // + // LogGroupName is a required field + LogGroupName *string `min:"1" type:"string" required:"true"` + + // The ARN of the role that CloudFormation should assume when sending log entries + // to CloudWatch logs. + // + // LogRoleArn is a required field + LogRoleArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingConfig) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LoggingConfig"} + + if s.LogGroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogGroupName", 1)) + } + + if s.LogRoleArn == nil { + invalidParams.Add(aws.NewErrParamRequired("LogRoleArn")) + } + if s.LogRoleArn != nil && len(*s.LogRoleArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogRoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The Output data type. type Output struct { _ struct{} `type:"structure"` @@ -936,6 +982,28 @@ type StackInstance struct { // The name of the AWS account that the stack instance is associated with. Account *string `type:"string"` + // Status of the stack instance's actual configuration compared to the expected + // template and parameter configuration of the stack set to which it belongs. + // + // * DRIFTED: The stack differs from the expected template and parameter + // configuration of the stack set to which it belongs. A stack instance is + // considered to have drifted if one or more of the resources in the associated + // stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked if the stack instance + // differs from its expected stack set configuration. + // + // * IN_SYNC: The stack instance's actual configuration matches its expected + // stack set configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack instance. This value will be NULL for any stack instance on + // which drift detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // A list of parameters from the stack set template whose values have been overridden // in this stack instance. ParameterOverrides []Parameter `type:"list"` @@ -984,6 +1052,28 @@ type StackInstanceSummary struct { // The name of the AWS account that the stack instance is associated with. Account *string `type:"string"` + // Status of the stack instance's actual configuration compared to the expected + // template and parameter configuration of the stack set to which it belongs. + // + // * DRIFTED: The stack differs from the expected template and parameter + // configuration of the stack set to which it belongs. A stack instance is + // considered to have drifted if one or more of the resources in the associated + // stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked if the stack instance + // differs from its expected stack set configuration. + // + // * IN_SYNC: The stack instance's actual configuration matches its expected + // stack set configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack instance. This value will be NULL for any stack instance on + // which drift detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // The name of the AWS region that the stack instance is associated with. Region *string `type:"string"` @@ -1374,6 +1464,13 @@ type StackSet struct { // The Amazon Resource Number (ARN) of the stack set. StackSetARN *string `type:"string"` + // Detailed information about the drift status of the stack set. + // + // For stack sets, contains information about the last completed drift operation + // performed on the stack set. Information about drift operations currently + // in progress is not included. + StackSetDriftDetectionDetails *StackSetDriftDetectionDetails `type:"structure"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1397,6 +1494,92 @@ func (s StackSet) String() string { return awsutil.Prettify(s) } +// Detailed information about the drift status of the stack set. +// +// For stack sets, contains information about the last completed drift operation +// performed on the stack set. Information about drift operations in-progress +// is not included. +// +// For stack set operations, includes information about drift operations currently +// being performed on the stack set. +// +// For more information, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html) +// in the AWS CloudFormation User Guide. +type StackSetDriftDetectionDetails struct { + _ struct{} `type:"structure"` + + // The status of the stack set drift detection operation. + // + // * COMPLETED: The drift detection operation completed without failing on + // any stack instances. + // + // * FAILED: The drift detection operation exceeded the specified failure + // tolerance. + // + // * PARTIAL_SUCCESS: The drift detection operation completed without exceeding + // the failure tolerance for the operation. + // + // * IN_PROGRESS: The drift detection operation is currently being performed. + // + // * STOPPED: The user has cancelled the drift detection operation. + DriftDetectionStatus StackSetDriftDetectionStatus `type:"string" enum:"true"` + + // Status of the stack set's actual configuration compared to its expected template + // and parameter configuration. A stack set is considered to have drifted if + // one or more of its stack instances have drifted from their expected template + // and parameter configuration. + // + // * DRIFTED: One or more of the stack instances belonging to the stack set + // stack differs from the expected template and parameter configuration. + // A stack instance is considered to have drifted if one or more of the resources + // in the associated stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // + // * IN_SYNC: All of the stack instances belonging to the stack set stack + // match from the expected template and parameter configuration. + DriftStatus StackSetDriftStatus `type:"string" enum:"true"` + + // The number of stack instances that have drifted from the expected template + // and parameter configuration of the stack set. A stack instance is considered + // to have drifted if one or more of the resources in the associated stack do + // not match their expected configuration. + DriftedStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances for which the drift detection operation failed. + FailedStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances that are currently being checked for drift. + InProgressStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances which match the expected template and parameter + // configuration of the stack set. + InSyncStackInstancesCount *int64 `type:"integer"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack set. This value will be NULL for any stack set on which drift + // detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + + // The total number of stack instances belonging to this stack set. + // + // The total number of stack instances is equal to the total of: + // + // * Stack instances that match the stack set configuration. + // + // * Stack instances that have drifted from the stack set configuration. + // + // * Stack instances where the drift detection operation has failed. + // + // * Stack instances currently being checked for drift. + TotalStackInstancesCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s StackSetDriftDetectionDetails) String() string { + return awsutil.Prettify(s) +} + // The structure that contains information about a stack set operation. type StackSetOperation struct { _ struct{} `type:"structure"` @@ -1446,6 +1629,17 @@ type StackSetOperation struct { // stack to a new stack set. RetainStacks *bool `type:"boolean"` + // Detailed information about the drift status of the stack set. This includes + // information about drift operations currently being performed on the stack + // set. + // + // this information will only be present for stack set operations whose Action + // type is DETECT_DRIFT. + // + // For more information, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html) + // in the AWS CloudFormation User Guide. + StackSetDriftDetectionDetails *StackSetDriftDetectionDetails `type:"structure"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1662,6 +1856,29 @@ type StackSetSummary struct { // or updated. Description *string `min:"1" type:"string"` + // Status of the stack set's actual configuration compared to its expected template + // and parameter configuration. A stack set is considered to have drifted if + // one or more of its stack instances have drifted from their expected template + // and parameter configuration. + // + // * DRIFTED: One or more of the stack instances belonging to the stack set + // stack differs from the expected template and parameter configuration. + // A stack instance is considered to have drifted if one or more of the resources + // in the associated stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // + // * IN_SYNC: All of the stack instances belonging to the stack set stack + // match from the expected template and parameter configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack set. This value will be NULL for any stack set on which drift + // detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1809,3 +2026,65 @@ type TemplateParameter struct { func (s TemplateParameter) String() string { return awsutil.Prettify(s) } + +// Contains summary information about the specified CloudFormation type. +type TypeSummary struct { + _ struct{} `type:"structure"` + + // The ID of the default version of the type. The default version is used when + // the type version is not specified. + // + // To set the default version of a type, use SetTypeDefaultVersion . + DefaultVersionId *string `min:"1" type:"string"` + + // The description of the type. + Description *string `min:"1" type:"string"` + + // When the current default version of the type was registered. + LastUpdated *time.Time `type:"timestamp"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type. + TypeArn *string `type:"string"` + + // The name of the type. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s TypeSummary) String() string { + return awsutil.Prettify(s) +} + +// Contains summary information about a specific version of a CloudFormation +// type. +type TypeVersionSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type version. + Arn *string `type:"string"` + + // The description of the type version. + Description *string `min:"1" type:"string"` + + // When the version was registered. + TimeCreated *time.Time `type:"timestamp"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TypeVersionSummary) String() string { + return awsutil.Prettify(s) +} diff --git a/service/cloudformation/api_waiters.go b/service/cloudformation/api_waiters.go index eac8e08f2f3..e705f3b236f 100644 --- a/service/cloudformation/api_waiters.go +++ b/service/cloudformation/api_waiters.go @@ -372,3 +372,47 @@ func (c *Client) WaitUntilStackUpdateComplete(ctx context.Context, input *Descri return w.Wait(ctx) } + +// WaitUntilTypeRegistrationComplete uses the AWS CloudFormation API operation +// DescribeTypeRegistration to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilTypeRegistrationComplete(ctx context.Context, input *DescribeTypeRegistrationInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilTypeRegistrationComplete", + MaxAttempts: 120, + Delay: aws.ConstantWaiterDelay(30 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "ProgressStatus", + Expected: "COMPLETE", + }, + { + State: aws.FailureWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "ProgressStatus", + Expected: "FAILED", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *DescribeTypeRegistrationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.DescribeTypeRegistrationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} diff --git a/service/cloudformation/cloudformationiface/interface.go b/service/cloudformation/cloudformationiface/interface.go index b5245a660dd..08e99255e2d 100644 --- a/service/cloudformation/cloudformationiface/interface.go +++ b/service/cloudformation/cloudformationiface/interface.go @@ -84,6 +84,8 @@ type ClientAPI interface { DeleteStackSetRequest(*cloudformation.DeleteStackSetInput) cloudformation.DeleteStackSetRequest + DeregisterTypeRequest(*cloudformation.DeregisterTypeInput) cloudformation.DeregisterTypeRequest + DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) cloudformation.DescribeAccountLimitsRequest DescribeChangeSetRequest(*cloudformation.DescribeChangeSetInput) cloudformation.DescribeChangeSetRequest @@ -106,10 +108,16 @@ type ClientAPI interface { DescribeStacksRequest(*cloudformation.DescribeStacksInput) cloudformation.DescribeStacksRequest + DescribeTypeRequest(*cloudformation.DescribeTypeInput) cloudformation.DescribeTypeRequest + + DescribeTypeRegistrationRequest(*cloudformation.DescribeTypeRegistrationInput) cloudformation.DescribeTypeRegistrationRequest + DetectStackDriftRequest(*cloudformation.DetectStackDriftInput) cloudformation.DetectStackDriftRequest DetectStackResourceDriftRequest(*cloudformation.DetectStackResourceDriftInput) cloudformation.DetectStackResourceDriftRequest + DetectStackSetDriftRequest(*cloudformation.DetectStackSetDriftInput) cloudformation.DetectStackSetDriftRequest + EstimateTemplateCostRequest(*cloudformation.EstimateTemplateCostInput) cloudformation.EstimateTemplateCostRequest ExecuteChangeSetRequest(*cloudformation.ExecuteChangeSetInput) cloudformation.ExecuteChangeSetRequest @@ -138,8 +146,20 @@ type ClientAPI interface { ListStacksRequest(*cloudformation.ListStacksInput) cloudformation.ListStacksRequest + ListTypeRegistrationsRequest(*cloudformation.ListTypeRegistrationsInput) cloudformation.ListTypeRegistrationsRequest + + ListTypeVersionsRequest(*cloudformation.ListTypeVersionsInput) cloudformation.ListTypeVersionsRequest + + ListTypesRequest(*cloudformation.ListTypesInput) cloudformation.ListTypesRequest + + RecordHandlerProgressRequest(*cloudformation.RecordHandlerProgressInput) cloudformation.RecordHandlerProgressRequest + + RegisterTypeRequest(*cloudformation.RegisterTypeInput) cloudformation.RegisterTypeRequest + SetStackPolicyRequest(*cloudformation.SetStackPolicyInput) cloudformation.SetStackPolicyRequest + SetTypeDefaultVersionRequest(*cloudformation.SetTypeDefaultVersionInput) cloudformation.SetTypeDefaultVersionRequest + SignalResourceRequest(*cloudformation.SignalResourceInput) cloudformation.SignalResourceRequest StopStackSetOperationRequest(*cloudformation.StopStackSetOperationInput) cloudformation.StopStackSetOperationRequest @@ -165,6 +185,8 @@ type ClientAPI interface { WaitUntilStackImportComplete(context.Context, *cloudformation.DescribeStacksInput, ...aws.WaiterOption) error WaitUntilStackUpdateComplete(context.Context, *cloudformation.DescribeStacksInput, ...aws.WaiterOption) error + + WaitUntilTypeRegistrationComplete(context.Context, *cloudformation.DescribeTypeRegistrationInput, ...aws.WaiterOption) error } var _ ClientAPI = (*cloudformation.Client)(nil) diff --git a/service/cloudsearch/api_enums.go b/service/cloudsearch/api_enums.go index b3f562a6dea..082e9d75f8d 100644 --- a/service/cloudsearch/api_enums.go +++ b/service/cloudsearch/api_enums.go @@ -176,3 +176,21 @@ func (enum SuggesterFuzzyMatching) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +// The minimum required TLS version. +type TLSSecurityPolicy string + +// Enum values for TLSSecurityPolicy +const ( + TLSSecurityPolicyPolicyMinTls10201907 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" + TLSSecurityPolicyPolicyMinTls12201907 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" +) + +func (enum TLSSecurityPolicy) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TLSSecurityPolicy) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/cloudsearch/api_errors.go b/service/cloudsearch/api_errors.go index f0ed2f1e2fc..b807f29ab04 100644 --- a/service/cloudsearch/api_errors.go +++ b/service/cloudsearch/api_errors.go @@ -41,4 +41,10 @@ const ( // The request was rejected because it attempted to reference a resource that // does not exist. ErrCodeResourceNotFoundException = "ResourceNotFound" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The request was rejected because it has invalid parameters. + ErrCodeValidationException = "ValidationException" ) diff --git a/service/cloudsearch/api_integ_test.go b/service/cloudsearch/api_integ_test.go new file mode 100644 index 00000000000..d582f55c799 --- /dev/null +++ b/service/cloudsearch/api_integ_test.go @@ -0,0 +1,63 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// +build integration + +package cloudsearch_test + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + "github.com/aws/aws-sdk-go-v2/internal/awstesting/integration" + "github.com/aws/aws-sdk-go-v2/service/cloudsearch" +) + +var _ aws.Config +var _ awserr.Error + +func TestInteg_00_DescribeDomains(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + cfg := integration.ConfigWithDefaultRegion("us-west-2") + svc := cloudsearch.New(cfg) + params := &cloudsearch.DescribeDomainsInput{} + + req := svc.DescribeDomainsRequest(params) + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) + _, err := req.Send(ctx) + if err != nil { + t.Errorf("expect no error, got %v", err) + } +} +func TestInteg_01_DescribeIndexFields(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + cfg := integration.ConfigWithDefaultRegion("us-west-2") + svc := cloudsearch.New(cfg) + params := &cloudsearch.DescribeIndexFieldsInput{ + DomainName: aws.String("fakedomain"), + } + + req := svc.DescribeIndexFieldsRequest(params) + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) + _, err := req.Send(ctx) + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if v := aerr.Code(); v == aws.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } +} diff --git a/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go b/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go new file mode 100644 index 00000000000..a03bcbc6063 --- /dev/null +++ b/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go @@ -0,0 +1,131 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudsearch + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Container for the parameters to the DescribeDomainEndpointOptions operation. +// Specify the name of the domain you want to describe. To show the active configuration +// and exclude any pending changes, set the Deployed option to true. +type DescribeDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to retrieve the latest configuration (which might be in a Processing + // state) or the current, active configuration. Defaults to false. + Deployed *bool `type:"boolean"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDomainEndpointOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDomainEndpointOptionsInput"} + + if s.DomainName == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeDomainEndpointOptions request. Contains the status +// and configuration of a search domain's endpoint options. +type DescribeDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeDomainEndpointOptions = "DescribeDomainEndpointOptions" + +// DescribeDomainEndpointOptionsRequest returns a request value for making API operation for +// Amazon CloudSearch. +// +// Returns the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// // Example sending a request using DescribeDomainEndpointOptionsRequest. +// req := client.DescribeDomainEndpointOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) DescribeDomainEndpointOptionsRequest(input *DescribeDomainEndpointOptionsInput) DescribeDomainEndpointOptionsRequest { + op := &aws.Operation{ + Name: opDescribeDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainEndpointOptionsInput{} + } + + req := c.newRequest(op, input, &DescribeDomainEndpointOptionsOutput{}) + return DescribeDomainEndpointOptionsRequest{Request: req, Input: input, Copy: c.DescribeDomainEndpointOptionsRequest} +} + +// DescribeDomainEndpointOptionsRequest is the request type for the +// DescribeDomainEndpointOptions API operation. +type DescribeDomainEndpointOptionsRequest struct { + *aws.Request + Input *DescribeDomainEndpointOptionsInput + Copy func(*DescribeDomainEndpointOptionsInput) DescribeDomainEndpointOptionsRequest +} + +// Send marshals and sends the DescribeDomainEndpointOptions API request. +func (r DescribeDomainEndpointOptionsRequest) Send(ctx context.Context) (*DescribeDomainEndpointOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDomainEndpointOptionsResponse{ + DescribeDomainEndpointOptionsOutput: r.Request.Data.(*DescribeDomainEndpointOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDomainEndpointOptionsResponse is the response type for the +// DescribeDomainEndpointOptions API operation. +type DescribeDomainEndpointOptionsResponse struct { + *DescribeDomainEndpointOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDomainEndpointOptions request. +func (r *DescribeDomainEndpointOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go b/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go new file mode 100644 index 00000000000..2ffeb6afe72 --- /dev/null +++ b/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go @@ -0,0 +1,138 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudsearch + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Container for the parameters to the UpdateDomainEndpointOptions operation. +// Specifies the name of the domain you want to update and the domain endpoint +// options. +type UpdateDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to require that all requests to the domain arrive over HTTPS. We + // recommend Policy-Min-TLS-1-2-2019-07 for TLSSecurityPolicy. For compatibility + // with older clients, the default is Policy-Min-TLS-1-0-2019-07. + // + // DomainEndpointOptions is a required field + DomainEndpointOptions *DomainEndpointOptions `type:"structure" required:"true"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainEndpointOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDomainEndpointOptionsInput"} + + if s.DomainEndpointOptions == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainEndpointOptions")) + } + + if s.DomainName == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a UpdateDomainEndpointOptions request. Contains the configuration +// and status of the domain's endpoint options. +type UpdateDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The newly-configured domain endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateDomainEndpointOptions = "UpdateDomainEndpointOptions" + +// UpdateDomainEndpointOptionsRequest returns a request value for making API operation for +// Amazon CloudSearch. +// +// Updates the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// // Example sending a request using UpdateDomainEndpointOptionsRequest. +// req := client.UpdateDomainEndpointOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) UpdateDomainEndpointOptionsRequest(input *UpdateDomainEndpointOptionsInput) UpdateDomainEndpointOptionsRequest { + op := &aws.Operation{ + Name: opUpdateDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainEndpointOptionsInput{} + } + + req := c.newRequest(op, input, &UpdateDomainEndpointOptionsOutput{}) + return UpdateDomainEndpointOptionsRequest{Request: req, Input: input, Copy: c.UpdateDomainEndpointOptionsRequest} +} + +// UpdateDomainEndpointOptionsRequest is the request type for the +// UpdateDomainEndpointOptions API operation. +type UpdateDomainEndpointOptionsRequest struct { + *aws.Request + Input *UpdateDomainEndpointOptionsInput + Copy func(*UpdateDomainEndpointOptionsInput) UpdateDomainEndpointOptionsRequest +} + +// Send marshals and sends the UpdateDomainEndpointOptions API request. +func (r UpdateDomainEndpointOptionsRequest) Send(ctx context.Context) (*UpdateDomainEndpointOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDomainEndpointOptionsResponse{ + UpdateDomainEndpointOptionsOutput: r.Request.Data.(*UpdateDomainEndpointOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDomainEndpointOptionsResponse is the response type for the +// UpdateDomainEndpointOptions API operation. +type UpdateDomainEndpointOptionsResponse struct { + *UpdateDomainEndpointOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDomainEndpointOptions request. +func (r *UpdateDomainEndpointOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudsearch/api_types.go b/service/cloudsearch/api_types.go index b896e03f753..426447b5570 100644 --- a/service/cloudsearch/api_types.go +++ b/service/cloudsearch/api_types.go @@ -306,6 +306,42 @@ func (s *DocumentSuggesterOptions) Validate() error { return nil } +// The domain's endpoint options. +type DomainEndpointOptions struct { + _ struct{} `type:"structure"` + + // Whether the domain is HTTPS only enabled. + EnforceHTTPS *bool `type:"boolean"` + + // The minimum required TLS version + TLSSecurityPolicy TLSSecurityPolicy `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptions) String() string { + return awsutil.Prettify(s) +} + +// The configuration and status of the domain's endpoint options. +type DomainEndpointOptionsStatus struct { + _ struct{} `type:"structure"` + + // The domain endpoint options configured for the domain. + // + // Options is a required field + Options *DomainEndpointOptions `type:"structure" required:"true"` + + // The status of the configured domain endpoint options. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptionsStatus) String() string { + return awsutil.Prettify(s) +} + // The current status of the search domain. type DomainStatus struct { _ struct{} `type:"structure"` diff --git a/service/cloudsearch/cloudsearchiface/interface.go b/service/cloudsearch/cloudsearchiface/interface.go index 9d8cc661608..e3f81be3101 100644 --- a/service/cloudsearch/cloudsearchiface/interface.go +++ b/service/cloudsearch/cloudsearchiface/interface.go @@ -87,6 +87,8 @@ type ClientAPI interface { DescribeAvailabilityOptionsRequest(*cloudsearch.DescribeAvailabilityOptionsInput) cloudsearch.DescribeAvailabilityOptionsRequest + DescribeDomainEndpointOptionsRequest(*cloudsearch.DescribeDomainEndpointOptionsInput) cloudsearch.DescribeDomainEndpointOptionsRequest + DescribeDomainsRequest(*cloudsearch.DescribeDomainsInput) cloudsearch.DescribeDomainsRequest DescribeExpressionsRequest(*cloudsearch.DescribeExpressionsInput) cloudsearch.DescribeExpressionsRequest @@ -105,6 +107,8 @@ type ClientAPI interface { UpdateAvailabilityOptionsRequest(*cloudsearch.UpdateAvailabilityOptionsInput) cloudsearch.UpdateAvailabilityOptionsRequest + UpdateDomainEndpointOptionsRequest(*cloudsearch.UpdateDomainEndpointOptionsInput) cloudsearch.UpdateDomainEndpointOptionsRequest + UpdateScalingParametersRequest(*cloudsearch.UpdateScalingParametersInput) cloudsearch.UpdateScalingParametersRequest UpdateServiceAccessPoliciesRequest(*cloudsearch.UpdateServiceAccessPoliciesInput) cloudsearch.UpdateServiceAccessPoliciesRequest diff --git a/service/cloudtrail/api_enums.go b/service/cloudtrail/api_enums.go index 833e547adc0..55b92bdb589 100644 --- a/service/cloudtrail/api_enums.go +++ b/service/cloudtrail/api_enums.go @@ -2,6 +2,38 @@ package cloudtrail +type EventCategory string + +// Enum values for EventCategory +const ( + EventCategoryInsight EventCategory = "insight" +) + +func (enum EventCategory) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EventCategory) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type InsightType string + +// Enum values for InsightType +const ( + InsightTypeApiCallRateInsight InsightType = "ApiCallRateInsight" +) + +func (enum InsightType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum InsightType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type LookupAttributeKey string // Enum values for LookupAttributeKey diff --git a/service/cloudtrail/api_errors.go b/service/cloudtrail/api_errors.go index 837329e9cce..bc55c48bb89 100644 --- a/service/cloudtrail/api_errors.go +++ b/service/cloudtrail/api_errors.go @@ -28,6 +28,13 @@ const ( // Cannot set a CloudWatch Logs delivery for this region. ErrCodeCloudWatchLogsDeliveryUnavailableException = "CloudWatchLogsDeliveryUnavailableException" + // ErrCodeInsightNotEnabledException for service response error code + // "InsightNotEnabledException". + // + // If you run GetInsightSelectors on a trail that does not have Insights events + // enabled, the operation throws the exception InsightNotEnabledException. + ErrCodeInsightNotEnabledException = "InsightNotEnabledException" + // ErrCodeInsufficientDependencyServiceAccessPermissionException for service response error code // "InsufficientDependencyServiceAccessPermissionException". // @@ -68,6 +75,13 @@ const ( // This exception is thrown when the provided role is not valid. ErrCodeInvalidCloudWatchLogsRoleArnException = "InvalidCloudWatchLogsRoleArnException" + // ErrCodeInvalidEventCategoryException for service response error code + // "InvalidEventCategoryException". + // + // Occurs if an event category that is not valid is specified as a value of + // EventCategory. + ErrCodeInvalidEventCategoryException = "InvalidEventCategoryException" + // ErrCodeInvalidEventSelectorsException for service response error code // "InvalidEventSelectorsException". // @@ -99,6 +113,14 @@ const ( // other than the region in which the trail was created. ErrCodeInvalidHomeRegionException = "InvalidHomeRegionException" + // ErrCodeInvalidInsightSelectorsException for service response error code + // "InvalidInsightSelectorsException". + // + // The formatting or syntax of the InsightSelectors JSON statement in your PutInsightSelectors + // or GetInsightSelectors request is not valid, or the specified insight type + // in the InsightSelectors statement is not a valid insight type. + ErrCodeInvalidInsightSelectorsException = "InvalidInsightSelectorsException" + // ErrCodeInvalidKmsKeyIdException for service response error code // "InvalidKmsKeyIdException". // diff --git a/service/cloudtrail/api_op_DescribeTrails.go b/service/cloudtrail/api_op_DescribeTrails.go index 15c3bb300f8..a983922945c 100644 --- a/service/cloudtrail/api_op_DescribeTrails.go +++ b/service/cloudtrail/api_op_DescribeTrails.go @@ -53,7 +53,11 @@ func (s DescribeTrailsInput) String() string { type DescribeTrailsOutput struct { _ struct{} `type:"structure"` - // The list of trail objects. + // The list of trail objects. Trail objects with string values are only returned + // if values for the objects exist in a trail's configuration. For example, + // SNSTopicName and SNSTopicARN are only returned in results if a trail is configured + // to send SNS notifications. Similarly, KMSKeyId only appears in results if + // a trail's log files are encrypted with AWS KMS-managed keys. TrailList []Trail `locationName:"trailList" type:"list"` } diff --git a/service/cloudtrail/api_op_GetInsightSelectors.go b/service/cloudtrail/api_op_GetInsightSelectors.go new file mode 100644 index 00000000000..b66745319d2 --- /dev/null +++ b/service/cloudtrail/api_op_GetInsightSelectors.go @@ -0,0 +1,147 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudtrail + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetInsightSelectorsInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the trail or trail ARN. If you specify a trail name, + // the string must meet the following requirements: + // + // * Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // * Start with a letter or number, and end with a letter or number + // + // * Be between 3 and 128 characters + // + // * Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are not valid. + // + // * Not be in IP address format (for example, 192.168.5.4) + // + // If you specify a trail ARN, it must be in the format: + // + // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // + // TrailName is a required field + TrailName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInsightSelectorsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInsightSelectorsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetInsightSelectorsInput"} + + if s.TrailName == nil { + invalidParams.Add(aws.NewErrParamRequired("TrailName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetInsightSelectorsOutput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + InsightSelectors []InsightSelector `type:"list"` + + // The Amazon Resource Name (ARN) of a trail for which you want to get Insights + // selectors. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s GetInsightSelectorsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetInsightSelectors = "GetInsightSelectors" + +// GetInsightSelectorsRequest returns a request value for making API operation for +// AWS CloudTrail. +// +// Describes the settings for the Insights event selectors that you configured +// for your trail. GetInsightSelectors shows if CloudTrail Insights event logging +// is enabled on the trail, and if it is, which insight types are enabled. If +// you run GetInsightSelectors on a trail that does not have Insights events +// enabled, the operation throws the exception InsightNotEnabledException +// +// For more information, see Logging CloudTrail Insights Events for Trails (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-insights-events-with-cloudtrail.html) +// in the AWS CloudTrail User Guide. +// +// // Example sending a request using GetInsightSelectorsRequest. +// req := client.GetInsightSelectorsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/GetInsightSelectors +func (c *Client) GetInsightSelectorsRequest(input *GetInsightSelectorsInput) GetInsightSelectorsRequest { + op := &aws.Operation{ + Name: opGetInsightSelectors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInsightSelectorsInput{} + } + + req := c.newRequest(op, input, &GetInsightSelectorsOutput{}) + return GetInsightSelectorsRequest{Request: req, Input: input, Copy: c.GetInsightSelectorsRequest} +} + +// GetInsightSelectorsRequest is the request type for the +// GetInsightSelectors API operation. +type GetInsightSelectorsRequest struct { + *aws.Request + Input *GetInsightSelectorsInput + Copy func(*GetInsightSelectorsInput) GetInsightSelectorsRequest +} + +// Send marshals and sends the GetInsightSelectors API request. +func (r GetInsightSelectorsRequest) Send(ctx context.Context) (*GetInsightSelectorsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetInsightSelectorsResponse{ + GetInsightSelectorsOutput: r.Request.Data.(*GetInsightSelectorsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetInsightSelectorsResponse is the response type for the +// GetInsightSelectors API operation. +type GetInsightSelectorsResponse struct { + *GetInsightSelectorsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetInsightSelectors request. +func (r *GetInsightSelectorsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudtrail/api_op_ListTrails.go b/service/cloudtrail/api_op_ListTrails.go index 18f086d8bd5..e2050c90e01 100644 --- a/service/cloudtrail/api_op_ListTrails.go +++ b/service/cloudtrail/api_op_ListTrails.go @@ -12,6 +12,11 @@ import ( type ListTrailsInput struct { _ struct{} `type:"structure"` + // The token to use to get the next page of results after a previous API call. + // This token must be passed in with the same parameters that were specified + // in the the original call. For example, if the original call specified an + // AttributeKey of 'Username' with a value of 'root', the call with NextToken + // should include those same parameters. NextToken *string `type:"string"` } @@ -23,6 +28,11 @@ func (s ListTrailsInput) String() string { type ListTrailsOutput struct { _ struct{} `type:"structure"` + // The token to use to get the next page of results after a previous API call. + // If the token does not appear, there are no more results to return. The token + // must be passed in with the same parameters as the previous call. For example, + // if the original call specified an AttributeKey of 'Username' with a value + // of 'root', the call with NextToken should include those same parameters. NextToken *string `type:"string"` // Returns the name, ARN, and home region of trails in the current account. diff --git a/service/cloudtrail/api_op_LookupEvents.go b/service/cloudtrail/api_op_LookupEvents.go index 17447047155..7008be0059b 100644 --- a/service/cloudtrail/api_op_LookupEvents.go +++ b/service/cloudtrail/api_op_LookupEvents.go @@ -20,6 +20,12 @@ type LookupEventsInput struct { // error is returned. EndTime *time.Time `type:"timestamp"` + // Specifies the event category. If you do not specify an event category, events + // of the category are not returned in the response. For example, if you do + // not specify insight as the value of EventCategory, no Insights events are + // returned. + EventCategory EventCategory `type:"string" enum:"true"` + // Contains a list of lookup attributes. Currently the list can contain only // one item. LookupAttributes []LookupAttribute `type:"list"` @@ -94,8 +100,10 @@ const opLookupEvents = "LookupEvents" // AWS CloudTrail. // // Looks up management events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events) -// captured by CloudTrail. You can look up events that occurred in a region -// within the last 90 days. Lookup supports the following attributes: +// or CloudTrail Insights events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-insights-events) +// that are captured by CloudTrail. You can look up events that occurred in +// a region within the last 90 days. Lookup supports the following attributes +// for management events: // // * AWS access key // @@ -113,16 +121,21 @@ const opLookupEvents = "LookupEvents" // // * User name // +// Lookup supports the following attributes for Insights events: +// +// * Event ID +// +// * Event name +// +// * Event source +// // All attributes are optional. The default number of results returned is 50, // with a maximum of 50 possible. The response includes a token that you can // use to get the next page of results. // -// The rate of lookup requests is limited to one per second per account. If +// The rate of lookup requests is limited to two per second per account. If // this limit is exceeded, a throttling error occurs. // -// Events that occurred during the selected time range will not be available -// for lookup if CloudTrail logging was not enabled when the events occurred. -// // // Example sending a request using LookupEventsRequest. // req := client.LookupEventsRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cloudtrail/api_op_PutInsightSelectors.go b/service/cloudtrail/api_op_PutInsightSelectors.go new file mode 100644 index 00000000000..659f69f3045 --- /dev/null +++ b/service/cloudtrail/api_op_PutInsightSelectors.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudtrail + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type PutInsightSelectorsInput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + // + // InsightSelectors is a required field + InsightSelectors []InsightSelector `type:"list" required:"true"` + + // The name of the CloudTrail trail for which you want to change or add Insights + // selectors. + // + // TrailName is a required field + TrailName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutInsightSelectorsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutInsightSelectorsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutInsightSelectorsInput"} + + if s.InsightSelectors == nil { + invalidParams.Add(aws.NewErrParamRequired("InsightSelectors")) + } + + if s.TrailName == nil { + invalidParams.Add(aws.NewErrParamRequired("TrailName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutInsightSelectorsOutput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + InsightSelectors []InsightSelector `type:"list"` + + // The Amazon Resource Name (ARN) of a trail for which you want to change or + // add Insights selectors. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s PutInsightSelectorsOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutInsightSelectors = "PutInsightSelectors" + +// PutInsightSelectorsRequest returns a request value for making API operation for +// AWS CloudTrail. +// +// Lets you enable Insights event logging by specifying the Insights selectors +// that you want to enable on an existing trail. You also use PutInsightSelectors +// to turn off Insights event logging, by passing an empty list of insight types. +// In this release, only ApiCallRateInsight is supported as an Insights selector. +// +// // Example sending a request using PutInsightSelectorsRequest. +// req := client.PutInsightSelectorsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/PutInsightSelectors +func (c *Client) PutInsightSelectorsRequest(input *PutInsightSelectorsInput) PutInsightSelectorsRequest { + op := &aws.Operation{ + Name: opPutInsightSelectors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutInsightSelectorsInput{} + } + + req := c.newRequest(op, input, &PutInsightSelectorsOutput{}) + return PutInsightSelectorsRequest{Request: req, Input: input, Copy: c.PutInsightSelectorsRequest} +} + +// PutInsightSelectorsRequest is the request type for the +// PutInsightSelectors API operation. +type PutInsightSelectorsRequest struct { + *aws.Request + Input *PutInsightSelectorsInput + Copy func(*PutInsightSelectorsInput) PutInsightSelectorsRequest +} + +// Send marshals and sends the PutInsightSelectors API request. +func (r PutInsightSelectorsRequest) Send(ctx context.Context) (*PutInsightSelectorsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutInsightSelectorsResponse{ + PutInsightSelectorsOutput: r.Request.Data.(*PutInsightSelectorsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutInsightSelectorsResponse is the response type for the +// PutInsightSelectors API operation. +type PutInsightSelectorsResponse struct { + *PutInsightSelectorsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutInsightSelectors request. +func (r *PutInsightSelectorsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudtrail/api_types.go b/service/cloudtrail/api_types.go index fe1b97ec65b..569a1bca112 100644 --- a/service/cloudtrail/api_types.go +++ b/service/cloudtrail/api_types.go @@ -13,8 +13,8 @@ var _ aws.Config var _ = awsutil.Prettify // The Amazon S3 buckets or AWS Lambda functions that you specify in your event -// selectors for your trail to log data events. Data events provide insight -// into the resource operations performed on or within a resource itself. These +// selectors for your trail to log data events. Data events provide information +// about the resource operations performed on or within a resource itself. These // are also known as data plane operations. You can specify up to 250 data resources // for a trail. // @@ -162,6 +162,14 @@ type EventSelector struct { // in the AWS CloudTrail User Guide. DataResources []DataResource `type:"list"` + // An optional list of service event sources from which you do not want management + // events to be logged on your trail. In this release, the list can be empty + // (disables the filter), or it can filter out AWS Key Management Service events + // by containing "kms.amazonaws.com". By default, ExcludeManagementEventSources + // is empty, and AWS KMS events are included in events that are logged to your + // trail. + ExcludeManagementEventSources []string `type:"list"` + // Specify if you want your event selector to include management events for // your trail. // @@ -184,6 +192,21 @@ func (s EventSelector) String() string { return awsutil.Prettify(s) } +// A JSON string that contains a list of insight types that are logged on a +// trail. +type InsightSelector struct { + _ struct{} `type:"structure"` + + // The type of insights to log on a trail. In this release, only ApiCallRateInsight + // is supported as an insight type. + InsightType InsightType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s InsightSelector) String() string { + return awsutil.Prettify(s) +} + // Specifies an attribute and value that filter the events returned. type LookupAttribute struct { _ struct{} `type:"structure"` @@ -333,6 +356,10 @@ type Trail struct { // Specifies if the trail has custom event selectors. HasCustomEventSelectors *bool `type:"boolean"` + // Specifies whether a trail has insight types specified in an InsightSelector + // list. + HasInsightSelectors *bool `type:"boolean"` + // The region in which the trail was created. HomeRegion *string `type:"string"` diff --git a/service/cloudtrail/cloudtrailiface/interface.go b/service/cloudtrail/cloudtrailiface/interface.go index c913fe19782..809f72aee13 100644 --- a/service/cloudtrail/cloudtrailiface/interface.go +++ b/service/cloudtrail/cloudtrailiface/interface.go @@ -71,6 +71,8 @@ type ClientAPI interface { GetEventSelectorsRequest(*cloudtrail.GetEventSelectorsInput) cloudtrail.GetEventSelectorsRequest + GetInsightSelectorsRequest(*cloudtrail.GetInsightSelectorsInput) cloudtrail.GetInsightSelectorsRequest + GetTrailRequest(*cloudtrail.GetTrailInput) cloudtrail.GetTrailRequest GetTrailStatusRequest(*cloudtrail.GetTrailStatusInput) cloudtrail.GetTrailStatusRequest @@ -85,6 +87,8 @@ type ClientAPI interface { PutEventSelectorsRequest(*cloudtrail.PutEventSelectorsInput) cloudtrail.PutEventSelectorsRequest + PutInsightSelectorsRequest(*cloudtrail.PutInsightSelectorsInput) cloudtrail.PutInsightSelectorsRequest + RemoveTagsRequest(*cloudtrail.RemoveTagsInput) cloudtrail.RemoveTagsRequest StartLoggingRequest(*cloudtrail.StartLoggingInput) cloudtrail.StartLoggingRequest diff --git a/service/cloudwatchlogs/api_op_CreateLogGroup.go b/service/cloudwatchlogs/api_op_CreateLogGroup.go index a5b891cdf23..c9ae1eb85cb 100644 --- a/service/cloudwatchlogs/api_op_CreateLogGroup.go +++ b/service/cloudwatchlogs/api_op_CreateLogGroup.go @@ -69,7 +69,7 @@ const opCreateLogGroup = "CreateLogGroup" // // Creates a log group with the specified name. // -// You can create up to 5000 log groups per account. +// You can create up to 20,000 log groups per account. // // You must use the following guidelines when naming a log group: // @@ -78,7 +78,8 @@ const opCreateLogGroup = "CreateLogGroup" // * Log group names can be between 1 and 512 characters long. // // * Log group names consist of the following characters: a-z, A-Z, 0-9, -// '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +// '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and +// '#' (number sign) // // If you associate a AWS Key Management Service (AWS KMS) customer master key // (CMK) with the log group, ingested data is encrypted using the CMK. This diff --git a/service/cloudwatchlogs/api_op_GetLogEvents.go b/service/cloudwatchlogs/api_op_GetLogEvents.go index 3367738de72..2fb020e6ab9 100644 --- a/service/cloudwatchlogs/api_op_GetLogEvents.go +++ b/service/cloudwatchlogs/api_op_GetLogEvents.go @@ -34,6 +34,8 @@ type GetLogEventsInput struct { // The token for the next set of items to return. (You received this token from // a previous call.) + // + // Using this token works only when you specify true for startFromHead. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // If the value is true, the earliest log events are returned first. If the diff --git a/service/cloudwatchlogs/api_op_PutDestination.go b/service/cloudwatchlogs/api_op_PutDestination.go index 020dcc44937..d3f12f578e6 100644 --- a/service/cloudwatchlogs/api_op_PutDestination.go +++ b/service/cloudwatchlogs/api_op_PutDestination.go @@ -82,11 +82,12 @@ const opPutDestination = "PutDestination" // PutDestinationRequest returns a request value for making API operation for // Amazon CloudWatch Logs. // -// Creates or updates a destination. A destination encapsulates a physical resource -// (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time -// stream of log events for a different account, ingested using PutLogEvents. -// A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose -// strea, or an AWS Lambda function. +// Creates or updates a destination. This operation is used only to create destinations +// for cross-account subscriptions. +// +// A destination encapsulates a physical resource (such as an Amazon Kinesis +// stream) and enables you to subscribe to a real-time stream of log events +// for a different account, ingested using PutLogEvents. // // Through an access policy, a destination controls what is written to it. By // default, PutDestination does not set any access policy with the destination, diff --git a/service/cloudwatchlogs/api_op_StartQuery.go b/service/cloudwatchlogs/api_op_StartQuery.go index 41b966a3e56..e582de48625 100644 --- a/service/cloudwatchlogs/api_op_StartQuery.go +++ b/service/cloudwatchlogs/api_op_StartQuery.go @@ -21,6 +21,7 @@ type StartQueryInput struct { // The maximum number of log events to return in the query. If the query string // uses the fields command, only the specified fields and their values are returned. + // The default is 1000. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The log group on which to perform the query. diff --git a/service/cloudwatchlogs/api_types.go b/service/cloudwatchlogs/api_types.go index 0465be0b7ec..864332282dc 100644 --- a/service/cloudwatchlogs/api_types.go +++ b/service/cloudwatchlogs/api_types.go @@ -267,9 +267,9 @@ type LogStream struct { // The number of bytes stored. // - // IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for - // log streams, and will be reported as zero. This change applies only to log - // streams. The storedBytes parameter for log groups is not affected. + // IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams, + // and is always reported as zero. This change applies only to log streams. + // The storedBytes parameter for log groups is not affected. StoredBytes *int64 `locationName:"storedBytes" deprecated:"true" type:"long"` // The sequence token. diff --git a/service/codebuild/api_doc.go b/service/codebuild/api_doc.go index 053accb8d46..b913de1bbfe 100644 --- a/service/codebuild/api_doc.go +++ b/service/codebuild/api_doc.go @@ -18,6 +18,8 @@ // // * BatchDeleteBuilds: Deletes one or more builds. // +// * BatchGetBuilds: Gets information about one or more builds. +// // * BatchGetProjects: Gets information about one or more build projects. // A build project defines how AWS CodeBuild runs a build. This includes // information such as where to get the source code to build, the build environment @@ -33,21 +35,21 @@ // CodeBuild to start rebuilding the source code every time a code change // is pushed to the repository. // -// * UpdateWebhook: Changes the settings of an existing webhook. -// // * DeleteProject: Deletes a build project. // +// * DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, +// or Bitbucket source credentials. +// // * DeleteWebhook: For an existing AWS CodeBuild build project that has // its source code stored in a GitHub or Bitbucket repository, stops AWS // CodeBuild from rebuilding the source code every time a code change is // pushed to the repository. // -// * ListProjects: Gets a list of build project names, with each build project -// name representing a single build project. -// -// * UpdateProject: Changes the settings of an existing build project. +// * ImportSourceCredentials: Imports the source repository credentials for +// an AWS CodeBuild project that has its source code stored in a GitHub, +// GitHub Enterprise, or Bitbucket repository. // -// * BatchGetBuilds: Gets information about one or more builds. +// * InvalidateProjectCache: Resets the cache for a project. // // * ListBuilds: Gets a list of build IDs, with each build ID representing // a single build. @@ -55,24 +57,24 @@ // * ListBuildsForProject: Gets a list of build IDs for the specified build // project, with each build ID representing a single build. // -// * StartBuild: Starts running a build. -// -// * StopBuild: Attempts to stop running a build. -// // * ListCuratedEnvironmentImages: Gets information about Docker images that // are managed by AWS CodeBuild. // -// * DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, -// or Bitbucket source credentials. -// -// * ImportSourceCredentials: Imports the source repository credentials for -// an AWS CodeBuild project that has its source code stored in a GitHub, -// GitHub Enterprise, or Bitbucket repository. +// * ListProjects: Gets a list of build project names, with each build project +// name representing a single build project. // // * ListSourceCredentials: Returns a list of SourceCredentialsInfo objects. // Each SourceCredentialsInfo object includes the authentication type, token // ARN, and type of source provider for one set of credentials. // +// * StartBuild: Starts running a build. +// +// * StopBuild: Attempts to stop running a build. +// +// * UpdateProject: Changes the settings of an existing build project. +// +// * UpdateWebhook: Changes the settings of an existing webhook. +// // See https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06 for more information on this service. // // See codebuild package documentation for more information. diff --git a/service/codebuild/api_enums.go b/service/codebuild/api_enums.go index 049653f0841..2d643b080d5 100644 --- a/service/codebuild/api_enums.go +++ b/service/codebuild/api_enums.go @@ -138,9 +138,10 @@ type ComputeType string // Enum values for ComputeType const ( - ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" - ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" - ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" + ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" + ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral12xlarge ComputeType = "BUILD_GENERAL1_2XLARGE" ) func (enum ComputeType) MarshalValue() (string, error) { @@ -172,8 +173,10 @@ type EnvironmentType string // Enum values for EnvironmentType const ( - EnvironmentTypeWindowsContainer EnvironmentType = "WINDOWS_CONTAINER" - EnvironmentTypeLinuxContainer EnvironmentType = "LINUX_CONTAINER" + EnvironmentTypeWindowsContainer EnvironmentType = "WINDOWS_CONTAINER" + EnvironmentTypeLinuxContainer EnvironmentType = "LINUX_CONTAINER" + EnvironmentTypeLinuxGpuContainer EnvironmentType = "LINUX_GPU_CONTAINER" + EnvironmentTypeArmContainer EnvironmentType = "ARM_CONTAINER" ) func (enum EnvironmentType) MarshalValue() (string, error) { diff --git a/service/codebuild/api_op_BatchGetBuilds.go b/service/codebuild/api_op_BatchGetBuilds.go index 81f0c032437..a52dddd32d0 100644 --- a/service/codebuild/api_op_BatchGetBuilds.go +++ b/service/codebuild/api_op_BatchGetBuilds.go @@ -60,7 +60,7 @@ const opBatchGetBuilds = "BatchGetBuilds" // BatchGetBuildsRequest returns a request value for making API operation for // AWS CodeBuild. // -// Gets information about builds. +// Gets information about one or more builds. // // // Example sending a request using BatchGetBuildsRequest. // req := client.BatchGetBuildsRequest(params) diff --git a/service/codebuild/api_op_BatchGetProjects.go b/service/codebuild/api_op_BatchGetProjects.go index d16bc58defe..3e01210bfcc 100644 --- a/service/codebuild/api_op_BatchGetProjects.go +++ b/service/codebuild/api_op_BatchGetProjects.go @@ -60,7 +60,7 @@ const opBatchGetProjects = "BatchGetProjects" // BatchGetProjectsRequest returns a request value for making API operation for // AWS CodeBuild. // -// Gets information about build projects. +// Gets information about one or more build projects. // // // Example sending a request using BatchGetProjectsRequest. // req := client.BatchGetProjectsRequest(params) diff --git a/service/codebuild/api_types.go b/service/codebuild/api_types.go index 33faae8eaad..9d546487228 100644 --- a/service/codebuild/api_types.go +++ b/service/codebuild/api_types.go @@ -966,7 +966,23 @@ type ProjectEnvironment struct { // // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. // - // * BUILD_GENERAL1_LARGE: Use up to 15 GB memory and 8 vCPUs for builds. + // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, + // depending on your environment type. + // + // * BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB + // of SSD storage for builds. This compute type supports Docker images up + // to 100 GB uncompressed. + // + // If you use BUILD_GENERAL1_LARGE: + // + // * For environment type LINUX_CONTAINER, you can use up to 15 GB memory + // and 8 vCPUs for builds. + // + // * For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, + // 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. + // + // * For environment type ARM_CONTAINER, you can use up to 16 GB memory and + // 8 vCPUs on ARM-based processors for builds. // // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) // in the AWS CodeBuild User Guide. @@ -1036,6 +1052,22 @@ type ProjectEnvironment struct { // The type of build environment to use for related builds. // + // * The environment type ARM_CONTAINER is available only in regions US East + // (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific + // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt). + // + // * The environment type LINUX_CONTAINER with compute type build.general1.2xlarge + // is available only in regions US East (N. Virginia), US East (N. Virginia), + // US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), + // Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), + // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). + // + // * The environment type LINUX_GPU_CONTAINER is available only in regions + // US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada + // (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), + // Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) + // , China (Beijing), and China (Ningxia). + // // Type is a required field Type EnvironmentType `locationName:"type" type:"string" required:"true" enum:"true"` } diff --git a/service/codecommit/api_doc.go b/service/codecommit/api_doc.go index a0b88e6f94e..b020c00c83b 100644 --- a/service/codecommit/api_doc.go +++ b/service/codecommit/api_doc.go @@ -27,12 +27,12 @@ // the repository. // // * UpdateRepositoryName, which changes the name of the repository. If you -// change the name of a repository, no other users of that repository will -// be able to access it until you send them the new HTTPS or SSH URL to use. +// change the name of a repository, no other users of that repository can +// access it until you send them the new HTTPS or SSH URL to use. // // Branches, by calling the following: // -// * CreateBranch, which creates a new branch in a specified repository. +// * CreateBranch, which creates a branch in a specified repository. // // * DeleteBranch, which deletes the specified branch in a repository unless // it is the default branch. @@ -49,7 +49,7 @@ // branch. // // * GetBlob, which returns the base-64 encoded content of an individual -// Git blob object within a repository. +// Git blob object in a repository. // // * GetFile, which returns the base-64 encoded content of a specified file. // @@ -61,7 +61,7 @@ // Commits, by calling the following: // // * BatchGetCommits, which returns information about one or more commits -// in a repository +// in a repository. // // * CreateCommit, which creates a commit for changes to a repository. // @@ -69,7 +69,7 @@ // messages and author and committer information. // // * GetDifferences, which returns information about the differences in a -// valid commit specifier (such as a branch, tag, HEAD, commit ID or other +// valid commit specifier (such as a branch, tag, HEAD, commit ID, or other // fully qualified reference). // // Merges, by calling the following: @@ -107,14 +107,31 @@ // // * CreatePullRequest, which creates a pull request in a specified repository. // +// * CreatePullRequestApprovalRule, which creates an approval rule for a +// specified pull request. +// +// * DeletePullRequestApprovalRule, which deletes an approval rule for a +// specified pull request. +// // * DescribePullRequestEvents, which returns information about one or more // pull request events. // +// * EvaluatePullRequestApprovalRules, which evaluates whether a pull request +// has met all the conditions specified in its associated approval rules. +// // * GetCommentsForPullRequest, which returns information about comments // on a specified pull request. // // * GetPullRequest, which returns information about a specified pull request. // +// * GetPullRequestApprovalStates, which returns information about the approval +// states for a specified pull request. +// +// * GetPullRequestOverrideState, which returns information about whether +// approval rules have been set aside (overriden) for a pull request, and +// if so, the Amazon Resource Name (ARN) of the user or identity that overrode +// the rules and their requirements for the pull request. +// // * ListPullRequests, which lists all pull requests for a repository. // // * MergePullRequestByFastForward, which merges the source destination branch @@ -129,9 +146,18 @@ // of a pull request into the specified destination branch for that pull // request using the three-way merge option. // +// * OverridePullRequestApprovalRules, which sets aside all approval rule +// requirements for a pull request. +// // * PostCommentForPullRequest, which posts a comment to a pull request at // the specified line, file, or request. // +// * UpdatePullRequestApprovalRuleContent, which updates the structure of +// an approval rule for a pull request. +// +// * UpdatePullRequestApprovalState, which updates the state of an approval +// on a pull request. +// // * UpdatePullRequestDescription, which updates the description of a pull // request. // @@ -139,6 +165,58 @@ // // * UpdatePullRequestTitle, which updates the title of a pull request. // +// Approval rule templates, by calling the following: +// +// * AssociateApprovalRuleTemplateWithRepository, which associates a template +// with a specified repository. After the template is associated with a repository, +// AWS CodeCommit creates approval rules that match the template conditions +// on every pull request created in the specified repository. +// +// * BatchAssociateApprovalRuleTemplateWithRepositories, which associates +// a template with one or more specified repositories. After the template +// is associated with a repository, AWS CodeCommit creates approval rules +// that match the template conditions on every pull request created in the +// specified repositories. +// +// * BatchDisassociateApprovalRuleTemplateFromRepositories, which removes +// the association between a template and specified repositories so that +// approval rules based on the template are not automatically created when +// pull requests are created in those repositories. +// +// * CreateApprovalRuleTemplate, which creates a template for approval rules +// that can then be associated with one or more repositories in your AWS +// account. +// +// * DeleteApprovalRuleTemplate, which deletes the specified template. It +// does not remove approval rules on pull requests already created with the +// template. +// +// * DisassociateApprovalRuleTemplateFromRepository, which removes the association +// between a template and a repository so that approval rules based on the +// template are not automatically created when pull requests are created +// in the specified repository. +// +// * GetApprovalRuleTemplate, which returns information about an approval +// rule template. +// +// * ListApprovalRuleTemplates, which lists all approval rule templates in +// the AWS Region in your AWS account. +// +// * ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval +// rule templates that are associated with a specified repository. +// +// * ListRepositoriesForApprovalRuleTemplate, which lists all repositories +// associated with the specified approval rule template. +// +// * UpdateApprovalRuleTemplateDescription, which updates the description +// of an approval rule template. +// +// * UpdateApprovalRuleTemplateName, which updates the name of an approval +// rule template. +// +// * UpdateApprovalRuleTemplateContent, which updates the content of an approval +// rule template. +// // Comments in a repository, by calling the following: // // * DeleteCommentContent, which deletes the content of a comment on a commit diff --git a/service/codecommit/api_enums.go b/service/codecommit/api_enums.go index 6d1cc61163d..9ea358acfec 100644 --- a/service/codecommit/api_enums.go +++ b/service/codecommit/api_enums.go @@ -2,6 +2,23 @@ package codecommit +type ApprovalState string + +// Enum values for ApprovalState +const ( + ApprovalStateApprove ApprovalState = "APPROVE" + ApprovalStateRevoke ApprovalState = "REVOKE" +) + +func (enum ApprovalState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ApprovalState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ChangeTypeEnum string // Enum values for ChangeTypeEnum @@ -128,6 +145,23 @@ func (enum OrderEnum) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type OverrideStatus string + +// Enum values for OverrideStatus +const ( + OverrideStatusOverride OverrideStatus = "OVERRIDE" + OverrideStatusRevoke OverrideStatus = "REVOKE" +) + +func (enum OverrideStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OverrideStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type PullRequestEventType string // Enum values for PullRequestEventType @@ -136,6 +170,11 @@ const ( PullRequestEventTypePullRequestStatusChanged PullRequestEventType = "PULL_REQUEST_STATUS_CHANGED" PullRequestEventTypePullRequestSourceReferenceUpdated PullRequestEventType = "PULL_REQUEST_SOURCE_REFERENCE_UPDATED" PullRequestEventTypePullRequestMergeStateChanged PullRequestEventType = "PULL_REQUEST_MERGE_STATE_CHANGED" + PullRequestEventTypePullRequestApprovalRuleCreated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_CREATED" + PullRequestEventTypePullRequestApprovalRuleUpdated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_UPDATED" + PullRequestEventTypePullRequestApprovalRuleDeleted PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_DELETED" + PullRequestEventTypePullRequestApprovalRuleOverridden PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN" + PullRequestEventTypePullRequestApprovalStateChanged PullRequestEventType = "PULL_REQUEST_APPROVAL_STATE_CHANGED" ) func (enum PullRequestEventType) MarshalValue() (string, error) { diff --git a/service/codecommit/api_errors.go b/service/codecommit/api_errors.go index c9c7bf90279..9320c2e95d1 100644 --- a/service/codecommit/api_errors.go +++ b/service/codecommit/api_errors.go @@ -10,6 +10,75 @@ const ( // The specified Amazon Resource Name (ARN) does not exist in the AWS account. ErrCodeActorDoesNotExistException = "ActorDoesNotExistException" + // ErrCodeApprovalRuleContentRequiredException for service response error code + // "ApprovalRuleContentRequiredException". + // + // The content for the approval rule is empty. You must provide some content + // for an approval rule. The content cannot be null. + ErrCodeApprovalRuleContentRequiredException = "ApprovalRuleContentRequiredException" + + // ErrCodeApprovalRuleDoesNotExistException for service response error code + // "ApprovalRuleDoesNotExistException". + // + // The specified approval rule does not exist. + ErrCodeApprovalRuleDoesNotExistException = "ApprovalRuleDoesNotExistException" + + // ErrCodeApprovalRuleNameAlreadyExistsException for service response error code + // "ApprovalRuleNameAlreadyExistsException". + // + // An approval rule with that name already exists. Approval rule names must + // be unique within the scope of a pull request. + ErrCodeApprovalRuleNameAlreadyExistsException = "ApprovalRuleNameAlreadyExistsException" + + // ErrCodeApprovalRuleNameRequiredException for service response error code + // "ApprovalRuleNameRequiredException". + // + // An approval rule name is required, but was not specified. + ErrCodeApprovalRuleNameRequiredException = "ApprovalRuleNameRequiredException" + + // ErrCodeApprovalRuleTemplateContentRequiredException for service response error code + // "ApprovalRuleTemplateContentRequiredException". + // + // The content for the approval rule template is empty. You must provide some + // content for an approval rule template. The content cannot be null. + ErrCodeApprovalRuleTemplateContentRequiredException = "ApprovalRuleTemplateContentRequiredException" + + // ErrCodeApprovalRuleTemplateDoesNotExistException for service response error code + // "ApprovalRuleTemplateDoesNotExistException". + // + // The specified approval rule template does not exist. Verify that the name + // is correct and that you are signed in to the AWS Region where the template + // was created, and then try again. + ErrCodeApprovalRuleTemplateDoesNotExistException = "ApprovalRuleTemplateDoesNotExistException" + + // ErrCodeApprovalRuleTemplateInUseException for service response error code + // "ApprovalRuleTemplateInUseException". + // + // The approval rule template is associated with one or more repositories. You + // cannot delete a template that is associated with a repository. Remove all + // associations, and then try again. + ErrCodeApprovalRuleTemplateInUseException = "ApprovalRuleTemplateInUseException" + + // ErrCodeApprovalRuleTemplateNameAlreadyExistsException for service response error code + // "ApprovalRuleTemplateNameAlreadyExistsException". + // + // You cannot create an approval rule template with that name because a template + // with that name already exists in this AWS Region for your AWS account. Approval + // rule template names must be unique. + ErrCodeApprovalRuleTemplateNameAlreadyExistsException = "ApprovalRuleTemplateNameAlreadyExistsException" + + // ErrCodeApprovalRuleTemplateNameRequiredException for service response error code + // "ApprovalRuleTemplateNameRequiredException". + // + // An approval rule template name is required, but was not specified. + ErrCodeApprovalRuleTemplateNameRequiredException = "ApprovalRuleTemplateNameRequiredException" + + // ErrCodeApprovalStateRequiredException for service response error code + // "ApprovalStateRequiredException". + // + // An approval state is required, but was not specified. + ErrCodeApprovalStateRequiredException = "ApprovalStateRequiredException" + // ErrCodeAuthorDoesNotExistException for service response error code // "AuthorDoesNotExistException". // @@ -32,7 +101,7 @@ const ( // ErrCodeBlobIdRequiredException for service response error code // "BlobIdRequiredException". // - // A blob ID is required but was not specified. + // A blob ID is required, but was not specified. ErrCodeBlobIdRequiredException = "BlobIdRequiredException" // ErrCodeBranchDoesNotExistException for service response error code @@ -50,17 +119,31 @@ const ( // ErrCodeBranchNameIsTagNameException for service response error code // "BranchNameIsTagNameException". // - // The specified branch name is not valid because it is a tag name. Type the - // name of a current branch in the repository. For a list of valid branch names, - // use ListBranches. + // The specified branch name is not valid because it is a tag name. Enter the + // name of a branch in the repository. For a list of valid branch names, use + // ListBranches. ErrCodeBranchNameIsTagNameException = "BranchNameIsTagNameException" // ErrCodeBranchNameRequiredException for service response error code // "BranchNameRequiredException". // - // A branch name is required but was not specified. + // A branch name is required, but was not specified. ErrCodeBranchNameRequiredException = "BranchNameRequiredException" + // ErrCodeCannotDeleteApprovalRuleFromTemplateException for service response error code + // "CannotDeleteApprovalRuleFromTemplateException". + // + // The approval rule cannot be deleted from the pull request because it was + // created by an approval rule template and applied to the pull request automatically. + ErrCodeCannotDeleteApprovalRuleFromTemplateException = "CannotDeleteApprovalRuleFromTemplateException" + + // ErrCodeCannotModifyApprovalRuleFromTemplateException for service response error code + // "CannotModifyApprovalRuleFromTemplateException". + // + // The approval rule cannot be modified for the pull request because it was + // created by an approval rule template and applied to the pull request automatically. + ErrCodeCannotModifyApprovalRuleFromTemplateException = "CannotModifyApprovalRuleFromTemplateException" + // ErrCodeCommentContentRequiredException for service response error code // "CommentContentRequiredException". // @@ -84,8 +167,8 @@ const ( // ErrCodeCommentDoesNotExistException for service response error code // "CommentDoesNotExistException". // - // No comment exists with the provided ID. Verify that you have provided the - // correct ID, and then try again. + // No comment exists with the provided ID. Verify that you have used the correct + // ID, and then try again. ErrCodeCommentDoesNotExistException = "CommentDoesNotExistException" // ErrCodeCommentIdRequiredException for service response error code @@ -130,6 +213,9 @@ const ( // ErrCodeCommitIdsListRequiredException for service response error code // "CommitIdsListRequiredException". + // + // A list of commit IDs is required, but was either not specified or the list + // was empty. ErrCodeCommitIdsListRequiredException = "CommitIdsListRequiredException" // ErrCodeCommitMessageLengthExceededException for service response error code @@ -204,7 +290,7 @@ const ( // // The commit cannot be created because both a source file and file content // have been specified for the same file. You cannot provide both. Either specify - // a source file, or provide the file content directly. + // a source file or provide the file content directly. ErrCodeFileContentAndSourceFileSpecifiedException = "FileContentAndSourceFileSpecifiedException" // ErrCodeFileContentRequiredException for service response error code @@ -217,16 +303,16 @@ const ( // ErrCodeFileContentSizeLimitExceededException for service response error code // "FileContentSizeLimitExceededException". // - // The file cannot be added because it is too large. The maximum file size that - // can be added is 6 MB, and the combined file content change size is 7 MB. - // Consider making these changes using a Git client. + // The file cannot be added because it is too large. The maximum file size is + // 6 MB, and the combined file content change size is 7 MB. Consider making + // these changes using a Git client. ErrCodeFileContentSizeLimitExceededException = "FileContentSizeLimitExceededException" // ErrCodeFileDoesNotExistException for service response error code // "FileDoesNotExistException". // - // The specified file does not exist. Verify that you have provided the correct - // name of the file, including its full path and extension. + // The specified file does not exist. Verify that you have used the correct + // file name, full path, and extension. ErrCodeFileDoesNotExistException = "FileDoesNotExistException" // ErrCodeFileEntryRequiredException for service response error code @@ -239,8 +325,8 @@ const ( // ErrCodeFileModeRequiredException for service response error code // "FileModeRequiredException". // - // The commit cannot be created because a file mode is required to update mode - // permissions for an existing file, but no file mode has been specified. + // The commit cannot be created because no file mode has been specified. A file + // mode is required to update mode permissions for a file. ErrCodeFileModeRequiredException = "FileModeRequiredException" // ErrCodeFileNameConflictsWithDirectoryNameException for service response error code @@ -281,14 +367,14 @@ const ( // "FolderDoesNotExistException". // // The specified folder does not exist. Either the folder name is not correct, - // or you did not provide the full path to the folder. + // or you did not enter the full path to the folder. ErrCodeFolderDoesNotExistException = "FolderDoesNotExistException" // ErrCodeIdempotencyParameterMismatchException for service response error code // "IdempotencyParameterMismatchException". // // The client request token is not valid. Either the token is not in a valid - // format, or the token has been used in a previous request and cannot be re-used. + // format, or the token has been used in a previous request and cannot be reused. ErrCodeIdempotencyParameterMismatchException = "IdempotencyParameterMismatchException" // ErrCodeInvalidActorArnException for service response error code @@ -299,6 +385,47 @@ const ( // and then try again. ErrCodeInvalidActorArnException = "InvalidActorArnException" + // ErrCodeInvalidApprovalRuleContentException for service response error code + // "InvalidApprovalRuleContentException". + // + // The content for the approval rule is not valid. + ErrCodeInvalidApprovalRuleContentException = "InvalidApprovalRuleContentException" + + // ErrCodeInvalidApprovalRuleNameException for service response error code + // "InvalidApprovalRuleNameException". + // + // The name for the approval rule is not valid. + ErrCodeInvalidApprovalRuleNameException = "InvalidApprovalRuleNameException" + + // ErrCodeInvalidApprovalRuleTemplateContentException for service response error code + // "InvalidApprovalRuleTemplateContentException". + // + // The content of the approval rule template is not valid. + ErrCodeInvalidApprovalRuleTemplateContentException = "InvalidApprovalRuleTemplateContentException" + + // ErrCodeInvalidApprovalRuleTemplateDescriptionException for service response error code + // "InvalidApprovalRuleTemplateDescriptionException". + // + // The description for the approval rule template is not valid because it exceeds + // the maximum characters allowed for a description. For more information about + // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + ErrCodeInvalidApprovalRuleTemplateDescriptionException = "InvalidApprovalRuleTemplateDescriptionException" + + // ErrCodeInvalidApprovalRuleTemplateNameException for service response error code + // "InvalidApprovalRuleTemplateNameException". + // + // The name of the approval rule template is not valid. Template names must + // be between 1 and 100 valid characters in length. For more information about + // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + ErrCodeInvalidApprovalRuleTemplateNameException = "InvalidApprovalRuleTemplateNameException" + + // ErrCodeInvalidApprovalStateException for service response error code + // "InvalidApprovalStateException". + // + // The state for the approval is not valid. Valid values include APPROVE and + // REVOKE. + ErrCodeInvalidApprovalStateException = "InvalidApprovalStateException" + // ErrCodeInvalidAuthorArnException for service response error code // "InvalidAuthorArnException". // @@ -376,8 +503,8 @@ const ( // ErrCodeInvalidDescriptionException for service response error code // "InvalidDescriptionException". // - // The pull request description is not valid. Descriptions are limited to 1,000 - // characters in length. + // The pull request description is not valid. Descriptions cannot be more than + // 1,000 characters. ErrCodeInvalidDescriptionException = "InvalidDescriptionException" // ErrCodeInvalidDestinationCommitSpecifierException for service response error code @@ -398,8 +525,8 @@ const ( // ErrCodeInvalidFileLocationException for service response error code // "InvalidFileLocationException". // - // The location of the file is not valid. Make sure that you include the extension - // of the file as well as the file name. + // The location of the file is not valid. Make sure that you include the file + // name and extension. ErrCodeInvalidFileLocationException = "InvalidFileLocationException" // ErrCodeInvalidFileModeException for service response error code @@ -447,6 +574,12 @@ const ( // The specified sort order is not valid. ErrCodeInvalidOrderException = "InvalidOrderException" + // ErrCodeInvalidOverrideStatusException for service response error code + // "InvalidOverrideStatusException". + // + // The override status is not valid. Valid statuses are OVERRIDE and REVOKE. + ErrCodeInvalidOverrideStatusException = "InvalidOverrideStatusException" + // ErrCodeInvalidParentCommitIdException for service response error code // "InvalidParentCommitIdException". // @@ -493,7 +626,7 @@ const ( // "InvalidReferenceNameException". // // The specified reference name format is not valid. Reference names must conform - // to the Git references format, for example refs/heads/master. For more information, + // to the Git references format (for example, refs/heads/master). For more information, // see Git Internals - Git References (https://git-scm.com/book/en/v2/Git-Internals-Git-References) // or consult your Git documentation. ErrCodeInvalidReferenceNameException = "InvalidReferenceNameException" @@ -528,9 +661,9 @@ const ( // ErrCodeInvalidRepositoryNameException for service response error code // "InvalidRepositoryNameException". // - // At least one specified repository name is not valid. + // A specified repository name is not valid. // - // This exception only occurs when a specified repository name is not valid. + // This exception occurs only when a specified repository name is not valid. // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. ErrCodeInvalidRepositoryNameException = "InvalidRepositoryNameException" @@ -571,8 +704,9 @@ const ( // ErrCodeInvalidRepositoryTriggerRegionException for service response error code // "InvalidRepositoryTriggerRegionException". // - // The region for the trigger target does not match the region for the repository. - // Triggers must be created in the same region as the target for the trigger. + // The AWS Region for the trigger target does not match the AWS Region for the + // repository. Triggers must be created in the same Region as the target for + // the trigger. ErrCodeInvalidRepositoryTriggerRegionException = "InvalidRepositoryTriggerRegionException" // ErrCodeInvalidResourceArnException for service response error code @@ -583,6 +717,18 @@ const ( // in the AWS CodeCommit User Guide. ErrCodeInvalidResourceArnException = "InvalidResourceArnException" + // ErrCodeInvalidRevisionIdException for service response error code + // "InvalidRevisionIdException". + // + // The revision ID is not valid. Use GetPullRequest to determine the value. + ErrCodeInvalidRevisionIdException = "InvalidRevisionIdException" + + // ErrCodeInvalidRuleContentSha256Exception for service response error code + // "InvalidRuleContentSha256Exception". + // + // The SHA-256 hash signature for the rule content is not valid. + ErrCodeInvalidRuleContentSha256Exception = "InvalidRuleContentSha256Exception" + // ErrCodeInvalidSortByException for service response error code // "InvalidSortByException". // @@ -680,10 +826,17 @@ const ( // ErrCodeMaximumItemsToCompareExceededException for service response error code // "MaximumItemsToCompareExceededException". // - // The maximum number of items to compare between the source or destination - // branches and the merge base has exceeded the maximum allowed. + // The number of items to compare between the source or destination branches + // and the merge base has exceeded the maximum allowed. ErrCodeMaximumItemsToCompareExceededException = "MaximumItemsToCompareExceededException" + // ErrCodeMaximumNumberOfApprovalsExceededException for service response error code + // "MaximumNumberOfApprovalsExceededException". + // + // The number of approvals required for the approval rule exceeds the maximum + // number allowed. + ErrCodeMaximumNumberOfApprovalsExceededException = "MaximumNumberOfApprovalsExceededException" + // ErrCodeMaximumOpenPullRequestsExceededException for service response error code // "MaximumOpenPullRequestsExceededException". // @@ -696,7 +849,7 @@ const ( // "MaximumRepositoryNamesExceededException". // // The maximum number of allowed repository names was exceeded. Currently, this - // number is 25. + // number is 100. ErrCodeMaximumRepositoryNamesExceededException = "MaximumRepositoryNamesExceededException" // ErrCodeMaximumRepositoryTriggersExceededException for service response error code @@ -705,6 +858,13 @@ const ( // The number of triggers allowed for the repository was exceeded. ErrCodeMaximumRepositoryTriggersExceededException = "MaximumRepositoryTriggersExceededException" + // ErrCodeMaximumRuleTemplatesAssociatedWithRepositoryException for service response error code + // "MaximumRuleTemplatesAssociatedWithRepositoryException". + // + // The maximum number of approval rule templates for a repository has been exceeded. + // You cannot associate more than 25 approval rule templates with a repository. + ErrCodeMaximumRuleTemplatesAssociatedWithRepositoryException = "MaximumRuleTemplatesAssociatedWithRepositoryException" + // ErrCodeMergeOptionRequiredException for service response error code // "MergeOptionRequiredException". // @@ -740,6 +900,33 @@ const ( // as a result of this commit. A commit must contain at least one change. ErrCodeNoChangeException = "NoChangeException" + // ErrCodeNumberOfRuleTemplatesExceededException for service response error code + // "NumberOfRuleTemplatesExceededException". + // + // The maximum number of approval rule templates has been exceeded for this + // AWS Region. + ErrCodeNumberOfRuleTemplatesExceededException = "NumberOfRuleTemplatesExceededException" + + // ErrCodeNumberOfRulesExceededException for service response error code + // "NumberOfRulesExceededException". + // + // The approval rule cannot be added. The pull request has the maximum number + // of approval rules associated with it. + ErrCodeNumberOfRulesExceededException = "NumberOfRulesExceededException" + + // ErrCodeOverrideAlreadySetException for service response error code + // "OverrideAlreadySetException". + // + // The pull request has already had its approval rules set to override. + ErrCodeOverrideAlreadySetException = "OverrideAlreadySetException" + + // ErrCodeOverrideStatusRequiredException for service response error code + // "OverrideStatusRequiredException". + // + // An override status is required, but no value was provided. Valid values include + // OVERRIDE and REVOKE. + ErrCodeOverrideStatusRequiredException = "OverrideStatusRequiredException" + // ErrCodeParentCommitDoesNotExistException for service response error code // "ParentCommitDoesNotExistException". // @@ -781,6 +968,21 @@ const ( // The pull request status cannot be updated because it is already closed. ErrCodePullRequestAlreadyClosedException = "PullRequestAlreadyClosedException" + // ErrCodePullRequestApprovalRulesNotSatisfiedException for service response error code + // "PullRequestApprovalRulesNotSatisfiedException". + // + // The pull request cannot be merged because one or more approval rules applied + // to the pull request have conditions that have not been met. + ErrCodePullRequestApprovalRulesNotSatisfiedException = "PullRequestApprovalRulesNotSatisfiedException" + + // ErrCodePullRequestCannotBeApprovedByAuthorException for service response error code + // "PullRequestCannotBeApprovedByAuthorException". + // + // The approval cannot be applied because the user approving the pull request + // matches the user who created the pull request. You cannot approve a pull + // request that you created. + ErrCodePullRequestCannotBeApprovedByAuthorException = "PullRequestCannotBeApprovedByAuthorException" + // ErrCodePullRequestDoesNotExistException for service response error code // "PullRequestDoesNotExistException". // @@ -828,7 +1030,7 @@ const ( // ErrCodeReplacementContentRequiredException for service response error code // "ReplacementContentRequiredException". // - // USE_NEW_CONTENT was specified but no replacement content has been provided. + // USE_NEW_CONTENT was specified, but no replacement content has been provided. ErrCodeReplacementContentRequiredException = "ReplacementContentRequiredException" // ErrCodeReplacementTypeRequiredException for service response error code @@ -858,13 +1060,13 @@ const ( // ErrCodeRepositoryNameRequiredException for service response error code // "RepositoryNameRequiredException". // - // A repository name is required but was not specified. + // A repository name is required, but was not specified. ErrCodeRepositoryNameRequiredException = "RepositoryNameRequiredException" // ErrCodeRepositoryNamesRequiredException for service response error code // "RepositoryNamesRequiredException". // - // A repository names object is required but was not specified. + // At least one repository name object is required, but was not specified. ErrCodeRepositoryNamesRequiredException = "RepositoryNamesRequiredException" // ErrCodeRepositoryNotAssociatedWithPullRequestException for service response error code @@ -878,43 +1080,43 @@ const ( // ErrCodeRepositoryTriggerBranchNameListRequiredException for service response error code // "RepositoryTriggerBranchNameListRequiredException". // - // At least one branch name is required but was not specified in the trigger + // At least one branch name is required, but was not specified in the trigger // configuration. ErrCodeRepositoryTriggerBranchNameListRequiredException = "RepositoryTriggerBranchNameListRequiredException" // ErrCodeRepositoryTriggerDestinationArnRequiredException for service response error code // "RepositoryTriggerDestinationArnRequiredException". // - // A destination ARN for the target service for the trigger is required but + // A destination ARN for the target service for the trigger is required, but // was not specified. ErrCodeRepositoryTriggerDestinationArnRequiredException = "RepositoryTriggerDestinationArnRequiredException" // ErrCodeRepositoryTriggerEventsListRequiredException for service response error code // "RepositoryTriggerEventsListRequiredException". // - // At least one event for the trigger is required but was not specified. + // At least one event for the trigger is required, but was not specified. ErrCodeRepositoryTriggerEventsListRequiredException = "RepositoryTriggerEventsListRequiredException" // ErrCodeRepositoryTriggerNameRequiredException for service response error code // "RepositoryTriggerNameRequiredException". // - // A name for the trigger is required but was not specified. + // A name for the trigger is required, but was not specified. ErrCodeRepositoryTriggerNameRequiredException = "RepositoryTriggerNameRequiredException" // ErrCodeRepositoryTriggersListRequiredException for service response error code // "RepositoryTriggersListRequiredException". // - // The list of triggers for the repository is required but was not specified. + // The list of triggers for the repository is required, but was not specified. ErrCodeRepositoryTriggersListRequiredException = "RepositoryTriggersListRequiredException" // ErrCodeRequestTokenRequiredException for service response error code // "RequestTokenRequiredException". // // A client request token is required. A client request token is an unique, - // client-generated idempotency token that when provided in a request, ensures + // client-generated idempotency token that, when provided in a request, ensures // the request cannot be repeated with a changed parameter. If a request is - // received with the same parameters and a token is included, the request will - // return information about the initial request that used that token. + // received with the same parameters and a token is included, the request returns + // information about the initial request that used that token. ErrCodeRequestTokenRequiredException = "RequestTokenRequiredException" // ErrCodeResourceArnRequiredException for service response error code @@ -933,6 +1135,19 @@ const ( // or moving a .gitkeep file. ErrCodeRestrictedSourceFileException = "RestrictedSourceFileException" + // ErrCodeRevisionIdRequiredException for service response error code + // "RevisionIdRequiredException". + // + // A revision ID is required, but was not provided. + ErrCodeRevisionIdRequiredException = "RevisionIdRequiredException" + + // ErrCodeRevisionNotCurrentException for service response error code + // "RevisionNotCurrentException". + // + // The revision ID provided in the request does not match the current revision + // ID. Use GetPullRequest to retrieve the current revision ID. + ErrCodeRevisionNotCurrentException = "RevisionNotCurrentException" + // ErrCodeSameFileContentException for service response error code // "SameFileContentException". // @@ -953,8 +1168,8 @@ const ( // ErrCodeSourceAndDestinationAreSameException for service response error code // "SourceAndDestinationAreSameException". // - // The source branch and the destination branch for the pull request are the - // same. You must specify different branches for the source and destination. + // The source branch and destination branch for the pull request are the same. + // You must specify different branches for the source and destination. ErrCodeSourceAndDestinationAreSameException = "SourceAndDestinationAreSameException" // ErrCodeSourceFileOrContentRequiredException for service response error code diff --git a/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go b/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go new file mode 100644 index 00000000000..ba138cd6bfb --- /dev/null +++ b/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type AssociateApprovalRuleTemplateWithRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name for the approval rule template. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The name of the repository that you want to associate with the template. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateApprovalRuleTemplateWithRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateApprovalRuleTemplateWithRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AssociateApprovalRuleTemplateWithRepositoryInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssociateApprovalRuleTemplateWithRepositoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateApprovalRuleTemplateWithRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opAssociateApprovalRuleTemplateWithRepository = "AssociateApprovalRuleTemplateWithRepository" + +// AssociateApprovalRuleTemplateWithRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an association between an approval rule template and a specified +// repository. Then, the next time a pull request is created in the repository +// where the destination reference (if specified) matches the destination reference +// (branch) for the pull request, an approval rule that matches the template +// conditions is automatically created for that pull request. If no destination +// references are specified in the template, an approval rule that matches the +// template contents is created for all pull requests in that repository. +// +// // Example sending a request using AssociateApprovalRuleTemplateWithRepositoryRequest. +// req := client.AssociateApprovalRuleTemplateWithRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/AssociateApprovalRuleTemplateWithRepository +func (c *Client) AssociateApprovalRuleTemplateWithRepositoryRequest(input *AssociateApprovalRuleTemplateWithRepositoryInput) AssociateApprovalRuleTemplateWithRepositoryRequest { + op := &aws.Operation{ + Name: opAssociateApprovalRuleTemplateWithRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateApprovalRuleTemplateWithRepositoryInput{} + } + + req := c.newRequest(op, input, &AssociateApprovalRuleTemplateWithRepositoryOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AssociateApprovalRuleTemplateWithRepositoryRequest{Request: req, Input: input, Copy: c.AssociateApprovalRuleTemplateWithRepositoryRequest} +} + +// AssociateApprovalRuleTemplateWithRepositoryRequest is the request type for the +// AssociateApprovalRuleTemplateWithRepository API operation. +type AssociateApprovalRuleTemplateWithRepositoryRequest struct { + *aws.Request + Input *AssociateApprovalRuleTemplateWithRepositoryInput + Copy func(*AssociateApprovalRuleTemplateWithRepositoryInput) AssociateApprovalRuleTemplateWithRepositoryRequest +} + +// Send marshals and sends the AssociateApprovalRuleTemplateWithRepository API request. +func (r AssociateApprovalRuleTemplateWithRepositoryRequest) Send(ctx context.Context) (*AssociateApprovalRuleTemplateWithRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AssociateApprovalRuleTemplateWithRepositoryResponse{ + AssociateApprovalRuleTemplateWithRepositoryOutput: r.Request.Data.(*AssociateApprovalRuleTemplateWithRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AssociateApprovalRuleTemplateWithRepositoryResponse is the response type for the +// AssociateApprovalRuleTemplateWithRepository API operation. +type AssociateApprovalRuleTemplateWithRepositoryResponse struct { + *AssociateApprovalRuleTemplateWithRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AssociateApprovalRuleTemplateWithRepository request. +func (r *AssociateApprovalRuleTemplateWithRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go b/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go new file mode 100644 index 00000000000..03500daab96 --- /dev/null +++ b/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type BatchAssociateApprovalRuleTemplateWithRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The name of the template you want to associate with one or more repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The names of the repositories you want to associate with the template. + // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // + // RepositoryNames is a required field + RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAssociateApprovalRuleTemplateWithRepositoriesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchAssociateApprovalRuleTemplateWithRepositoriesInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryNames == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchAssociateApprovalRuleTemplateWithRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of the repositories that have been associated with the template. + // + // AssociatedRepositoryNames is a required field + AssociatedRepositoryNames []string `locationName:"associatedRepositoryNames" type:"list" required:"true"` + + // A list of any errors that might have occurred while attempting to create + // the association between the template and the repositories. + // + // Errors is a required field + Errors []BatchAssociateApprovalRuleTemplateWithRepositoriesError `locationName:"errors" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +const opBatchAssociateApprovalRuleTemplateWithRepositories = "BatchAssociateApprovalRuleTemplateWithRepositories" + +// BatchAssociateApprovalRuleTemplateWithRepositoriesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an association between an approval rule template and one or more +// specified repositories. +// +// // Example sending a request using BatchAssociateApprovalRuleTemplateWithRepositoriesRequest. +// req := client.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchAssociateApprovalRuleTemplateWithRepositories +func (c *Client) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(input *BatchAssociateApprovalRuleTemplateWithRepositoriesInput) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest { + op := &aws.Operation{ + Name: opBatchAssociateApprovalRuleTemplateWithRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchAssociateApprovalRuleTemplateWithRepositoriesInput{} + } + + req := c.newRequest(op, input, &BatchAssociateApprovalRuleTemplateWithRepositoriesOutput{}) + return BatchAssociateApprovalRuleTemplateWithRepositoriesRequest{Request: req, Input: input, Copy: c.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest} +} + +// BatchAssociateApprovalRuleTemplateWithRepositoriesRequest is the request type for the +// BatchAssociateApprovalRuleTemplateWithRepositories API operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesRequest struct { + *aws.Request + Input *BatchAssociateApprovalRuleTemplateWithRepositoriesInput + Copy func(*BatchAssociateApprovalRuleTemplateWithRepositoriesInput) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest +} + +// Send marshals and sends the BatchAssociateApprovalRuleTemplateWithRepositories API request. +func (r BatchAssociateApprovalRuleTemplateWithRepositoriesRequest) Send(ctx context.Context) (*BatchAssociateApprovalRuleTemplateWithRepositoriesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchAssociateApprovalRuleTemplateWithRepositoriesResponse{ + BatchAssociateApprovalRuleTemplateWithRepositoriesOutput: r.Request.Data.(*BatchAssociateApprovalRuleTemplateWithRepositoriesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchAssociateApprovalRuleTemplateWithRepositoriesResponse is the response type for the +// BatchAssociateApprovalRuleTemplateWithRepositories API operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesResponse struct { + *BatchAssociateApprovalRuleTemplateWithRepositoriesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchAssociateApprovalRuleTemplateWithRepositories request. +func (r *BatchAssociateApprovalRuleTemplateWithRepositoriesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchDescribeMergeConflicts.go b/service/codecommit/api_op_BatchDescribeMergeConflicts.go index 38f5c5b1564..54db1a5fed2 100644 --- a/service/codecommit/api_op_BatchDescribeMergeConflicts.go +++ b/service/codecommit/api_op_BatchDescribeMergeConflicts.go @@ -13,20 +13,19 @@ type BatchDescribeMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -46,7 +45,7 @@ type BatchDescribeMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -57,7 +56,7 @@ type BatchDescribeMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` diff --git a/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go b/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go new file mode 100644 index 00000000000..eb4d5a10b24 --- /dev/null +++ b/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type BatchDisassociateApprovalRuleTemplateFromRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The name of the template that you want to disassociate from one or more repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The repository names that you want to disassociate from the approval rule + // template. + // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // + // RepositoryNames is a required field + RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryNames == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repository names that have had their association with the template + // removed. + // + // DisassociatedRepositoryNames is a required field + DisassociatedRepositoryNames []string `locationName:"disassociatedRepositoryNames" type:"list" required:"true"` + + // A list of any errors that might have occurred while attempting to remove + // the association between the template and the repositories. + // + // Errors is a required field + Errors []BatchDisassociateApprovalRuleTemplateFromRepositoriesError `locationName:"errors" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +const opBatchDisassociateApprovalRuleTemplateFromRepositories = "BatchDisassociateApprovalRuleTemplateFromRepositories" + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Removes the association between an approval rule template and one or more +// specified repositories. +// +// // Example sending a request using BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest. +// req := client.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchDisassociateApprovalRuleTemplateFromRepositories +func (c *Client) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(input *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest { + op := &aws.Operation{ + Name: opBatchDisassociateApprovalRuleTemplateFromRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDisassociateApprovalRuleTemplateFromRepositoriesInput{} + } + + req := c.newRequest(op, input, &BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput{}) + return BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest{Request: req, Input: input, Copy: c.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest} +} + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest is the request type for the +// BatchDisassociateApprovalRuleTemplateFromRepositories API operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest struct { + *aws.Request + Input *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput + Copy func(*BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest +} + +// Send marshals and sends the BatchDisassociateApprovalRuleTemplateFromRepositories API request. +func (r BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest) Send(ctx context.Context) (*BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse{ + BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput: r.Request.Data.(*BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse is the response type for the +// BatchDisassociateApprovalRuleTemplateFromRepositories API operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse struct { + *BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchDisassociateApprovalRuleTemplateFromRepositories request. +func (r *BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchGetCommits.go b/service/codecommit/api_op_BatchGetCommits.go index 3180c7ff3cd..30ca2f29ffc 100644 --- a/service/codecommit/api_op_BatchGetCommits.go +++ b/service/codecommit/api_op_BatchGetCommits.go @@ -14,7 +14,8 @@ type BatchGetCommitsInput struct { // The full commit IDs of the commits to get information about. // - // You must supply the full SHAs of each commit. You cannot use shortened SHAs. + // You must supply the full SHA IDs of each commit. You cannot use shortened + // SHA IDs. // // CommitIds is a required field CommitIds []string `locationName:"commitIds" type:"list" required:"true"` @@ -59,9 +60,8 @@ type BatchGetCommitsOutput struct { Commits []Commit `locationName:"commits" type:"list"` // Returns any commit IDs for which information could not be found. For example, - // if one of the commit IDs was a shortened SHA or that commit was not found - // in the specified repository, the ID will return an error object with additional - // information. + // if one of the commit IDs was a shortened SHA ID or that commit was not found + // in the specified repository, the ID returns an error object with more information. Errors []BatchGetCommitsError `locationName:"errors" type:"list"` } diff --git a/service/codecommit/api_op_BatchGetRepositories.go b/service/codecommit/api_op_BatchGetRepositories.go index eb5f9b2251c..660270e0cde 100644 --- a/service/codecommit/api_op_BatchGetRepositories.go +++ b/service/codecommit/api_op_BatchGetRepositories.go @@ -15,6 +15,9 @@ type BatchGetRepositoriesInput struct { // The names of the repositories to get information about. // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // // RepositoryNames is a required field RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` } @@ -63,9 +66,9 @@ const opBatchGetRepositories = "BatchGetRepositories" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using BatchGetRepositoriesRequest. // req := client.BatchGetRepositoriesRequest(params) diff --git a/service/codecommit/api_op_CreateApprovalRuleTemplate.go b/service/codecommit/api_op_CreateApprovalRuleTemplate.go new file mode 100644 index 00000000000..000d444fc67 --- /dev/null +++ b/service/codecommit/api_op_CreateApprovalRuleTemplate.go @@ -0,0 +1,171 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreateApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the approval rule that is created on pull requests in associated + // repositories. If you specify one or more destination references (branches), + // approval rules are created in an associated repository only if their destination + // references (branches) match those specified in the template. + // + // When you create the content of the approval rule template, you can specify + // approvers in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following are + // counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // ApprovalRuleTemplateContent is a required field + ApprovalRuleTemplateContent *string `locationName:"approvalRuleTemplateContent" min:"1" type:"string" required:"true"` + + // The description of the approval rule template. Consider providing a description + // that explains what this template does and when it might be appropriate to + // associate it with repositories. + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string"` + + // The name of the approval rule template. Provide descriptive names, because + // this name is applied to the approval rules created automatically in associated + // repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateContent == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateContent")) + } + if s.ApprovalRuleTemplateContent != nil && len(*s.ApprovalRuleTemplateContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateContent", 1)) + } + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content and structure of the created approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateApprovalRuleTemplate = "CreateApprovalRuleTemplate" + +// CreateApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates a template for approval rules that can then be associated with one +// or more repositories in your AWS account. When you associate a template with +// a repository, AWS CodeCommit creates an approval rule that matches the conditions +// of the template for all pull requests that meet the conditions of the template. +// For more information, see AssociateApprovalRuleTemplateWithRepository. +// +// // Example sending a request using CreateApprovalRuleTemplateRequest. +// req := client.CreateApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateApprovalRuleTemplate +func (c *Client) CreateApprovalRuleTemplateRequest(input *CreateApprovalRuleTemplateInput) CreateApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opCreateApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &CreateApprovalRuleTemplateOutput{}) + return CreateApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.CreateApprovalRuleTemplateRequest} +} + +// CreateApprovalRuleTemplateRequest is the request type for the +// CreateApprovalRuleTemplate API operation. +type CreateApprovalRuleTemplateRequest struct { + *aws.Request + Input *CreateApprovalRuleTemplateInput + Copy func(*CreateApprovalRuleTemplateInput) CreateApprovalRuleTemplateRequest +} + +// Send marshals and sends the CreateApprovalRuleTemplate API request. +func (r CreateApprovalRuleTemplateRequest) Send(ctx context.Context) (*CreateApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateApprovalRuleTemplateResponse{ + CreateApprovalRuleTemplateOutput: r.Request.Data.(*CreateApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateApprovalRuleTemplateResponse is the response type for the +// CreateApprovalRuleTemplate API operation. +type CreateApprovalRuleTemplateResponse struct { + *CreateApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateApprovalRuleTemplate request. +func (r *CreateApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_CreateBranch.go b/service/codecommit/api_op_CreateBranch.go index 4ef9c66a760..f147bbb9a46 100644 --- a/service/codecommit/api_op_CreateBranch.go +++ b/service/codecommit/api_op_CreateBranch.go @@ -78,7 +78,7 @@ const opCreateBranch = "CreateBranch" // CreateBranchRequest returns a request value for making API operation for // AWS CodeCommit. // -// Creates a new branch in a repository and points the branch to a commit. +// Creates a branch in a repository and points the branch to a commit. // // Calling the create branch operation does not set a repository's default branch. // To do this, call the update default branch operation. diff --git a/service/codecommit/api_op_CreateCommit.go b/service/codecommit/api_op_CreateCommit.go index b8ea6f004d5..26f90be33bb 100644 --- a/service/codecommit/api_op_CreateCommit.go +++ b/service/codecommit/api_op_CreateCommit.go @@ -13,40 +13,38 @@ import ( type CreateCommitInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` - // The name of the branch where you will create the commit. + // The name of the branch where you create the commit. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // The commit message you want to include as part of creating the commit. Commit - // messages are limited to 256 KB. If no message is specified, a default message - // will be used. + // The commit message you want to include in the commit. Commit messages are + // limited to 256 KB. If no message is specified, a default message is used. CommitMessage *string `locationName:"commitMessage" type:"string"` - // The files to delete in this commit. These files will still exist in prior - // commits. + // The files to delete in this commit. These files still exist in earlier commits. DeleteFiles []DeleteFileEntry `locationName:"deleteFiles" type:"list"` // The email address of the person who created the commit. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a ..gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` - // The ID of the commit that is the parent of the commit you will create. If - // this is an empty repository, this is not required. + // The ID of the commit that is the parent of the commit you create. Not required + // if this is an empty repository. ParentCommitId *string `locationName:"parentCommitId" type:"string"` // The files to add or update in this commit. PutFiles []PutFileEntry `locationName:"putFiles" type:"list"` - // The name of the repository where you will create the commit. + // The name of the repository where you create the commit. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreatePullRequest.go b/service/codecommit/api_op_CreatePullRequest.go index ee9a52d7e43..eb1345f6c4b 100644 --- a/service/codecommit/api_op_CreatePullRequest.go +++ b/service/codecommit/api_op_CreatePullRequest.go @@ -13,28 +13,28 @@ import ( type CreatePullRequestInput struct { _ struct{} `type:"structure"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. // - // The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you - // do not have to generate an idempotency token, as this will be done for you. + // The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, + // an idempotency token is created for you. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // A description of the pull request. Description *string `locationName:"description" type:"string"` // The targets for the pull request, including the source of the code to be - // reviewed (the source branch), and the destination where the creator of the + // reviewed (the source branch) and the destination where the creator of the // pull request intends the code to be merged after the pull request is closed // (the destination branch). // // Targets is a required field Targets []Target `locationName:"targets" type:"list" required:"true"` - // The title of the pull request. This title will be used to identify the pull - // request to other users in the repository. + // The title of the pull request. This title is used to identify the pull request + // to other users in the repository. // // Title is a required field Title *string `locationName:"title" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreatePullRequestApprovalRule.go b/service/codecommit/api_op_CreatePullRequestApprovalRule.go new file mode 100644 index 00000000000..470efa2df16 --- /dev/null +++ b/service/codecommit/api_op_CreatePullRequestApprovalRule.go @@ -0,0 +1,169 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreatePullRequestApprovalRuleInput struct { + _ struct{} `type:"structure"` + + // The content of the approval rule, including the number of approvals needed + // and the structure of an approval pool defined for approvals, if any. For + // more information about approval pools, see the AWS CodeCommit User Guide. + // + // When you create the content of the approval rule, you can specify approvers + // in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following would + // be counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // ApprovalRuleContent is a required field + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string" required:"true"` + + // The name for the approval rule. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request for which you want to create + // the approval rule. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestApprovalRuleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePullRequestApprovalRuleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreatePullRequestApprovalRuleInput"} + + if s.ApprovalRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleContent")) + } + if s.ApprovalRuleContent != nil && len(*s.ApprovalRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleContent", 1)) + } + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreatePullRequestApprovalRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the created approval rule. + // + // ApprovalRule is a required field + ApprovalRule *ApprovalRule `locationName:"approvalRule" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestApprovalRuleOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreatePullRequestApprovalRule = "CreatePullRequestApprovalRule" + +// CreatePullRequestApprovalRuleRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an approval rule for a pull request. +// +// // Example sending a request using CreatePullRequestApprovalRuleRequest. +// req := client.CreatePullRequestApprovalRuleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreatePullRequestApprovalRule +func (c *Client) CreatePullRequestApprovalRuleRequest(input *CreatePullRequestApprovalRuleInput) CreatePullRequestApprovalRuleRequest { + op := &aws.Operation{ + Name: opCreatePullRequestApprovalRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePullRequestApprovalRuleInput{} + } + + req := c.newRequest(op, input, &CreatePullRequestApprovalRuleOutput{}) + return CreatePullRequestApprovalRuleRequest{Request: req, Input: input, Copy: c.CreatePullRequestApprovalRuleRequest} +} + +// CreatePullRequestApprovalRuleRequest is the request type for the +// CreatePullRequestApprovalRule API operation. +type CreatePullRequestApprovalRuleRequest struct { + *aws.Request + Input *CreatePullRequestApprovalRuleInput + Copy func(*CreatePullRequestApprovalRuleInput) CreatePullRequestApprovalRuleRequest +} + +// Send marshals and sends the CreatePullRequestApprovalRule API request. +func (r CreatePullRequestApprovalRuleRequest) Send(ctx context.Context) (*CreatePullRequestApprovalRuleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreatePullRequestApprovalRuleResponse{ + CreatePullRequestApprovalRuleOutput: r.Request.Data.(*CreatePullRequestApprovalRuleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreatePullRequestApprovalRuleResponse is the response type for the +// CreatePullRequestApprovalRule API operation. +type CreatePullRequestApprovalRuleResponse struct { + *CreatePullRequestApprovalRuleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreatePullRequestApprovalRule request. +func (r *CreatePullRequestApprovalRuleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_CreateRepository.go b/service/codecommit/api_op_CreateRepository.go index f41b64c0d1d..4cfa5743e79 100644 --- a/service/codecommit/api_op_CreateRepository.go +++ b/service/codecommit/api_op_CreateRepository.go @@ -17,18 +17,18 @@ type CreateRepositoryInput struct { // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description - // and display it in a web page could expose users to potentially malicious - // code. Make sure that you HTML-encode the description field in any application - // that uses this API to display the repository description on a web page. + // and display it in a webpage can expose users to potentially malicious code. + // Make sure that you HTML-encode the description field in any application that + // uses this API to display the repository description on a webpage. RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` // The name of the new repository to be created. // - // The repository name must be unique across the calling AWS account. In addition, - // repository names are limited to 100 alphanumeric, dash, and underscore characters, - // and cannot include certain characters. For a full description of the limits + // The repository name must be unique across the calling AWS account. Repository + // names are limited to 100 alphanumeric, dash, and underscore characters, and + // cannot include certain characters. For more information about the limits // on repository names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) - // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. + // in the AWS CodeCommit User Guide. The suffix .git is prohibited. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreateUnreferencedMergeCommit.go b/service/codecommit/api_op_CreateUnreferencedMergeCommit.go index 585a3fe3a86..1965ec7690a 100644 --- a/service/codecommit/api_op_CreateUnreferencedMergeCommit.go +++ b/service/codecommit/api_op_CreateUnreferencedMergeCommit.go @@ -13,31 +13,30 @@ type CreateUnreferencedMergeCommitInput struct { _ struct{} `type:"structure"` // The name of the author who created the unreferenced commit. This information - // will be used as both the author and committer for the commit. + // is used as both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message for the unreferenced commit. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -47,7 +46,7 @@ type CreateUnreferencedMergeCommitInput struct { // If the commit contains deletions, whether to keep a folder or folder structure // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // file is created for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The merge option or strategy you want to use to merge the code. @@ -62,7 +61,7 @@ type CreateUnreferencedMergeCommitInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -130,7 +129,7 @@ const opCreateUnreferencedMergeCommit = "CreateUnreferencedMergeCommit" // Creates an unreferenced commit that represents the result of merging two // branches using a specified merge strategy. This can help you determine the // outcome of a potential merge. This API cannot be used with the fast-forward -// merge strategy, as that strategy does not create a merge commit. +// merge strategy because that strategy does not create a merge commit. // // This unreferenced merge commit can only be accessed using the GetCommit API // or through git commands such as git fetch. To retrieve this commit, you must diff --git a/service/codecommit/api_op_DeleteApprovalRuleTemplate.go b/service/codecommit/api_op_DeleteApprovalRuleTemplate.go new file mode 100644 index 00000000000..2b8de16da01 --- /dev/null +++ b/service/codecommit/api_op_DeleteApprovalRuleTemplate.go @@ -0,0 +1,125 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeleteApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template to delete. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The system-generated ID of the deleted approval rule template. If the template + // has been previously deleted, the only response is a 200 OK. + // + // ApprovalRuleTemplateId is a required field + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteApprovalRuleTemplate = "DeleteApprovalRuleTemplate" + +// DeleteApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Deletes a specified approval rule template. Deleting a template does not +// remove approval rules on pull requests already created with the template. +// +// // Example sending a request using DeleteApprovalRuleTemplateRequest. +// req := client.DeleteApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteApprovalRuleTemplate +func (c *Client) DeleteApprovalRuleTemplateRequest(input *DeleteApprovalRuleTemplateInput) DeleteApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opDeleteApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &DeleteApprovalRuleTemplateOutput{}) + return DeleteApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.DeleteApprovalRuleTemplateRequest} +} + +// DeleteApprovalRuleTemplateRequest is the request type for the +// DeleteApprovalRuleTemplate API operation. +type DeleteApprovalRuleTemplateRequest struct { + *aws.Request + Input *DeleteApprovalRuleTemplateInput + Copy func(*DeleteApprovalRuleTemplateInput) DeleteApprovalRuleTemplateRequest +} + +// Send marshals and sends the DeleteApprovalRuleTemplate API request. +func (r DeleteApprovalRuleTemplateRequest) Send(ctx context.Context) (*DeleteApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteApprovalRuleTemplateResponse{ + DeleteApprovalRuleTemplateOutput: r.Request.Data.(*DeleteApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteApprovalRuleTemplateResponse is the response type for the +// DeleteApprovalRuleTemplate API operation. +type DeleteApprovalRuleTemplateResponse struct { + *DeleteApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteApprovalRuleTemplate request. +func (r *DeleteApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_DeleteFile.go b/service/codecommit/api_op_DeleteFile.go index 03ad3c44055..1b73170435e 100644 --- a/service/codecommit/api_op_DeleteFile.go +++ b/service/codecommit/api_op_DeleteFile.go @@ -12,43 +12,42 @@ import ( type DeleteFileInput struct { _ struct{} `type:"structure"` - // The name of the branch where the commit will be made deleting the file. + // The name of the branch where the commit that deletes the file is made. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` // The commit message you want to include as part of deleting the file. Commit // messages are limited to 256 KB. If no message is specified, a default message - // will be used. + // is used. CommitMessage *string `locationName:"commitMessage" type:"string"` // The email address for the commit that deletes the file. If no email address - // is specified, the email address will be left blank. + // is specified, the email address is left blank. Email *string `locationName:"email" type:"string"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. For example, /examples/file.md is a - // fully qualified path to a file named file.md in a folder named examples. + // The fully qualified path to the file that to be deleted, including the full + // name and extension of that file. For example, /examples/file.md is a fully + // qualified path to a file named file.md in a folder named examples. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // Specifies whether to delete the folder or directory that contains the file - // you want to delete if that file is the only object in the folder or directory. - // By default, empty folders will be deleted. This includes empty folders that - // are part of the directory structure. For example, if the path to a file is - // dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file - // in dir4 will also delete the empty folders dir4, dir3, and dir2. + // If a file is the only object in the folder or directory, specifies whether + // to delete the folder or directory that contains the file. By default, empty + // folders are deleted. This includes empty folders that are part of the directory + // structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and + // dir2 and dir3 are empty, deleting the last file in dir4 also deletes the + // empty folders dir4, dir3, and dir2. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the author of the commit that deletes the file. If no name is - // specified, the user's ARN will be used as the author name and committer name. + // specified, the user's ARN is used as the author name and committer name. Name *string `locationName:"name" type:"string"` // The ID of the commit that is the tip of the branch where you want to create - // the commit that will delete the file. This must be the HEAD commit for the - // branch. The commit that deletes the file will be created from this commit - // ID. + // the commit that deletes the file. This must be the HEAD commit for the branch. + // The commit that deletes the file is created from this commit ID. // // ParentCommitId is a required field ParentCommitId *string `locationName:"parentCommitId" type:"string" required:"true"` @@ -110,8 +109,8 @@ type DeleteFileOutput struct { // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. + // The fully qualified path to the file to be deleted, including the full name + // and extension of that file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` @@ -134,8 +133,8 @@ const opDeleteFile = "DeleteFile" // AWS CodeCommit. // // Deletes a specified file from a specified branch. A commit is created on -// the branch that contains the revision. The file will still exist in the commits -// prior to the commit that contains the deletion. +// the branch that contains the revision. The file still exists in the commits +// earlier to the commit that contains the deletion. // // // Example sending a request using DeleteFileRequest. // req := client.DeleteFileRequest(params) diff --git a/service/codecommit/api_op_DeletePullRequestApprovalRule.go b/service/codecommit/api_op_DeletePullRequestApprovalRule.go new file mode 100644 index 00000000000..78cb776328b --- /dev/null +++ b/service/codecommit/api_op_DeletePullRequestApprovalRule.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeletePullRequestApprovalRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule you want to delete. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request that contains the approval rule + // you want to delete. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePullRequestApprovalRuleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePullRequestApprovalRuleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeletePullRequestApprovalRuleInput"} + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePullRequestApprovalRuleOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted approval rule. + // + // If the approval rule was deleted in an earlier API call, the response is + // 200 OK without content. + // + // ApprovalRuleId is a required field + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePullRequestApprovalRuleOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeletePullRequestApprovalRule = "DeletePullRequestApprovalRule" + +// DeletePullRequestApprovalRuleRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Deletes an approval rule from a specified pull request. Approval rules can +// be deleted from a pull request only if the pull request is open, and if the +// approval rule was created specifically for a pull request and not generated +// from an approval rule template associated with the repository where the pull +// request was created. You cannot delete an approval rule from a merged or +// closed pull request. +// +// // Example sending a request using DeletePullRequestApprovalRuleRequest. +// req := client.DeletePullRequestApprovalRuleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeletePullRequestApprovalRule +func (c *Client) DeletePullRequestApprovalRuleRequest(input *DeletePullRequestApprovalRuleInput) DeletePullRequestApprovalRuleRequest { + op := &aws.Operation{ + Name: opDeletePullRequestApprovalRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePullRequestApprovalRuleInput{} + } + + req := c.newRequest(op, input, &DeletePullRequestApprovalRuleOutput{}) + return DeletePullRequestApprovalRuleRequest{Request: req, Input: input, Copy: c.DeletePullRequestApprovalRuleRequest} +} + +// DeletePullRequestApprovalRuleRequest is the request type for the +// DeletePullRequestApprovalRule API operation. +type DeletePullRequestApprovalRuleRequest struct { + *aws.Request + Input *DeletePullRequestApprovalRuleInput + Copy func(*DeletePullRequestApprovalRuleInput) DeletePullRequestApprovalRuleRequest +} + +// Send marshals and sends the DeletePullRequestApprovalRule API request. +func (r DeletePullRequestApprovalRuleRequest) Send(ctx context.Context) (*DeletePullRequestApprovalRuleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeletePullRequestApprovalRuleResponse{ + DeletePullRequestApprovalRuleOutput: r.Request.Data.(*DeletePullRequestApprovalRuleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeletePullRequestApprovalRuleResponse is the response type for the +// DeletePullRequestApprovalRule API operation. +type DeletePullRequestApprovalRuleResponse struct { + *DeletePullRequestApprovalRuleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeletePullRequestApprovalRule request. +func (r *DeletePullRequestApprovalRuleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_DeleteRepository.go b/service/codecommit/api_op_DeleteRepository.go index c888f0eb147..ae95e8297ee 100644 --- a/service/codecommit/api_op_DeleteRepository.go +++ b/service/codecommit/api_op_DeleteRepository.go @@ -60,11 +60,11 @@ const opDeleteRepository = "DeleteRepository" // AWS CodeCommit. // // Deletes a repository. If a specified repository was already deleted, a null -// repository ID will be returned. +// repository ID is returned. // // Deleting a repository also deletes all associated objects and metadata. After // a repository is deleted, all future push calls to the deleted repository -// will fail. +// fail. // // // Example sending a request using DeleteRepositoryRequest. // req := client.DeleteRepositoryRequest(params) diff --git a/service/codecommit/api_op_DescribeMergeConflicts.go b/service/codecommit/api_op_DescribeMergeConflicts.go index 21261eac651..ffde20b5e61 100644 --- a/service/codecommit/api_op_DescribeMergeConflicts.go +++ b/service/codecommit/api_op_DescribeMergeConflicts.go @@ -13,20 +13,19 @@ type DescribeMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -44,7 +43,7 @@ type DescribeMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -55,7 +54,7 @@ type DescribeMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -143,7 +142,7 @@ const opDescribeMergeConflicts = "DescribeMergeConflicts" // Returns information about one or more merge conflicts in the attempted merge // of two commit specifiers using the squash or three-way merge strategy. If // the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, -// an exception will be thrown. +// an exception is thrown. // // // Example sending a request using DescribeMergeConflictsRequest. // req := client.DescribeMergeConflictsRequest(params) diff --git a/service/codecommit/api_op_DescribePullRequestEvents.go b/service/codecommit/api_op_DescribePullRequestEvents.go index a3f0c946cbd..41b37a20b2f 100644 --- a/service/codecommit/api_op_DescribePullRequestEvents.go +++ b/service/codecommit/api_op_DescribePullRequestEvents.go @@ -13,16 +13,16 @@ type DescribePullRequestEventsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the user whose actions resulted in the - // event. Examples include updating the pull request with additional commits - // or changing the status of a pull request. + // event. Examples include updating the pull request with more commits or changing + // the status of a pull request. ActorArn *string `locationName:"actorArn" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 events, which is also the maximum number of events that can - // be returned in a result. + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 events, which is also the maximum number of events that + // can be returned in a result. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go b/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go new file mode 100644 index 00000000000..c6b92516b48 --- /dev/null +++ b/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type DisassociateApprovalRuleTemplateFromRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template to disassociate from a specified repository. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The name of the repository you want to disassociate from the template. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateApprovalRuleTemplateFromRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateApprovalRuleTemplateFromRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DisassociateApprovalRuleTemplateFromRepositoryInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisassociateApprovalRuleTemplateFromRepositoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateApprovalRuleTemplateFromRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opDisassociateApprovalRuleTemplateFromRepository = "DisassociateApprovalRuleTemplateFromRepository" + +// DisassociateApprovalRuleTemplateFromRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Removes the association between a template and a repository so that approval +// rules based on the template are not automatically created when pull requests +// are created in the specified repository. This does not delete any approval +// rules previously created for pull requests through the template association. +// +// // Example sending a request using DisassociateApprovalRuleTemplateFromRepositoryRequest. +// req := client.DisassociateApprovalRuleTemplateFromRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DisassociateApprovalRuleTemplateFromRepository +func (c *Client) DisassociateApprovalRuleTemplateFromRepositoryRequest(input *DisassociateApprovalRuleTemplateFromRepositoryInput) DisassociateApprovalRuleTemplateFromRepositoryRequest { + op := &aws.Operation{ + Name: opDisassociateApprovalRuleTemplateFromRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateApprovalRuleTemplateFromRepositoryInput{} + } + + req := c.newRequest(op, input, &DisassociateApprovalRuleTemplateFromRepositoryOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DisassociateApprovalRuleTemplateFromRepositoryRequest{Request: req, Input: input, Copy: c.DisassociateApprovalRuleTemplateFromRepositoryRequest} +} + +// DisassociateApprovalRuleTemplateFromRepositoryRequest is the request type for the +// DisassociateApprovalRuleTemplateFromRepository API operation. +type DisassociateApprovalRuleTemplateFromRepositoryRequest struct { + *aws.Request + Input *DisassociateApprovalRuleTemplateFromRepositoryInput + Copy func(*DisassociateApprovalRuleTemplateFromRepositoryInput) DisassociateApprovalRuleTemplateFromRepositoryRequest +} + +// Send marshals and sends the DisassociateApprovalRuleTemplateFromRepository API request. +func (r DisassociateApprovalRuleTemplateFromRepositoryRequest) Send(ctx context.Context) (*DisassociateApprovalRuleTemplateFromRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DisassociateApprovalRuleTemplateFromRepositoryResponse{ + DisassociateApprovalRuleTemplateFromRepositoryOutput: r.Request.Data.(*DisassociateApprovalRuleTemplateFromRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DisassociateApprovalRuleTemplateFromRepositoryResponse is the response type for the +// DisassociateApprovalRuleTemplateFromRepository API operation. +type DisassociateApprovalRuleTemplateFromRepositoryResponse struct { + *DisassociateApprovalRuleTemplateFromRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DisassociateApprovalRuleTemplateFromRepository request. +func (r *DisassociateApprovalRuleTemplateFromRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go b/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go new file mode 100644 index 00000000000..dcb4008ae03 --- /dev/null +++ b/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type EvaluatePullRequestApprovalRulesInput struct { + _ struct{} `type:"structure"` + + // The system-generated ID of the pull request you want to evaluate. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID for the pull request revision. To retrieve the most + // recent revision ID for a pull request, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluatePullRequestApprovalRulesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EvaluatePullRequestApprovalRulesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EvaluatePullRequestApprovalRulesInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EvaluatePullRequestApprovalRulesOutput struct { + _ struct{} `type:"structure"` + + // The result of the evaluation, including the names of the rules whose conditions + // have been met (if any), the names of the rules whose conditions have not + // been met (if any), whether the pull request is in the approved state, and + // whether the pull request approval rule has been set aside by an override. + // + // Evaluation is a required field + Evaluation *Evaluation `locationName:"evaluation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s EvaluatePullRequestApprovalRulesOutput) String() string { + return awsutil.Prettify(s) +} + +const opEvaluatePullRequestApprovalRules = "EvaluatePullRequestApprovalRules" + +// EvaluatePullRequestApprovalRulesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Evaluates whether a pull request has met all the conditions specified in +// its associated approval rules. +// +// // Example sending a request using EvaluatePullRequestApprovalRulesRequest. +// req := client.EvaluatePullRequestApprovalRulesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/EvaluatePullRequestApprovalRules +func (c *Client) EvaluatePullRequestApprovalRulesRequest(input *EvaluatePullRequestApprovalRulesInput) EvaluatePullRequestApprovalRulesRequest { + op := &aws.Operation{ + Name: opEvaluatePullRequestApprovalRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EvaluatePullRequestApprovalRulesInput{} + } + + req := c.newRequest(op, input, &EvaluatePullRequestApprovalRulesOutput{}) + return EvaluatePullRequestApprovalRulesRequest{Request: req, Input: input, Copy: c.EvaluatePullRequestApprovalRulesRequest} +} + +// EvaluatePullRequestApprovalRulesRequest is the request type for the +// EvaluatePullRequestApprovalRules API operation. +type EvaluatePullRequestApprovalRulesRequest struct { + *aws.Request + Input *EvaluatePullRequestApprovalRulesInput + Copy func(*EvaluatePullRequestApprovalRulesInput) EvaluatePullRequestApprovalRulesRequest +} + +// Send marshals and sends the EvaluatePullRequestApprovalRules API request. +func (r EvaluatePullRequestApprovalRulesRequest) Send(ctx context.Context) (*EvaluatePullRequestApprovalRulesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &EvaluatePullRequestApprovalRulesResponse{ + EvaluatePullRequestApprovalRulesOutput: r.Request.Data.(*EvaluatePullRequestApprovalRulesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// EvaluatePullRequestApprovalRulesResponse is the response type for the +// EvaluatePullRequestApprovalRules API operation. +type EvaluatePullRequestApprovalRulesResponse struct { + *EvaluatePullRequestApprovalRulesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// EvaluatePullRequestApprovalRules request. +func (r *EvaluatePullRequestApprovalRulesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetApprovalRuleTemplate.go b/service/codecommit/api_op_GetApprovalRuleTemplate.go new file mode 100644 index 00000000000..26457822202 --- /dev/null +++ b/service/codecommit/api_op_GetApprovalRuleTemplate.go @@ -0,0 +1,123 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template for which you want to get information. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content and structure of the approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetApprovalRuleTemplate = "GetApprovalRuleTemplate" + +// GetApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Returns information about a specified approval rule template. +// +// // Example sending a request using GetApprovalRuleTemplateRequest. +// req := client.GetApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetApprovalRuleTemplate +func (c *Client) GetApprovalRuleTemplateRequest(input *GetApprovalRuleTemplateInput) GetApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opGetApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &GetApprovalRuleTemplateOutput{}) + return GetApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.GetApprovalRuleTemplateRequest} +} + +// GetApprovalRuleTemplateRequest is the request type for the +// GetApprovalRuleTemplate API operation. +type GetApprovalRuleTemplateRequest struct { + *aws.Request + Input *GetApprovalRuleTemplateInput + Copy func(*GetApprovalRuleTemplateInput) GetApprovalRuleTemplateRequest +} + +// Send marshals and sends the GetApprovalRuleTemplate API request. +func (r GetApprovalRuleTemplateRequest) Send(ctx context.Context) (*GetApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetApprovalRuleTemplateResponse{ + GetApprovalRuleTemplateOutput: r.Request.Data.(*GetApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetApprovalRuleTemplateResponse is the response type for the +// GetApprovalRuleTemplate API operation. +type GetApprovalRuleTemplateResponse struct { + *GetApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetApprovalRuleTemplate request. +func (r *GetApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetBlob.go b/service/codecommit/api_op_GetBlob.go index 44c45b181f1..d2501fc0b8f 100644 --- a/service/codecommit/api_op_GetBlob.go +++ b/service/codecommit/api_op_GetBlob.go @@ -72,7 +72,7 @@ const opGetBlob = "GetBlob" // GetBlobRequest returns a request value for making API operation for // AWS CodeCommit. // -// Returns the base-64 encoded content of an individual blob within a repository. +// Returns the base-64 encoded content of an individual blob in a repository. // // // Example sending a request using GetBlobRequest. // req := client.GetBlobRequest(params) diff --git a/service/codecommit/api_op_GetCommentsForComparedCommit.go b/service/codecommit/api_op_GetCommentsForComparedCommit.go index 922f8c9042c..6c7492d9d42 100644 --- a/service/codecommit/api_op_GetCommentsForComparedCommit.go +++ b/service/codecommit/api_op_GetCommentsForComparedCommit.go @@ -13,17 +13,17 @@ type GetCommentsForComparedCommitInput struct { _ struct{} `type:"structure"` // To establish the directionality of the comparison, the full commit ID of - // the 'after' commit. + // the after commit. // // AfterCommitId is a required field AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` // To establish the directionality of the comparison, the full commit ID of - // the 'before' commit. + // the before commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments, and is configurable up to 500. + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 comments, but you can configure up to 500. MaxResults *int64 `locationName:"maxResults" type:"integer"` // An enumeration token that when provided in a request, returns the next batch diff --git a/service/codecommit/api_op_GetCommentsForPullRequest.go b/service/codecommit/api_op_GetCommentsForPullRequest.go index 7bce7bcc34d..d604ed8a15a 100644 --- a/service/codecommit/api_op_GetCommentsForPullRequest.go +++ b/service/codecommit/api_op_GetCommentsForPullRequest.go @@ -20,12 +20,12 @@ type GetCommentsForPullRequestInput struct { // of the branch at the time the pull request was created. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments. You can return up to 500 comments with a single + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 comments. You can return up to 500 comments with a single // request. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_GetCommit.go b/service/codecommit/api_op_GetCommit.go index 931cf68397f..683cd321174 100644 --- a/service/codecommit/api_op_GetCommit.go +++ b/service/codecommit/api_op_GetCommit.go @@ -13,7 +13,7 @@ import ( type GetCommitInput struct { _ struct{} `type:"structure"` - // The commit ID. Commit IDs are the full SHA of the commit. + // The commit ID. Commit IDs are the full SHA ID of the commit. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` diff --git a/service/codecommit/api_op_GetDifferences.go b/service/codecommit/api_op_GetDifferences.go index b75951d0ed1..a97b659c4c4 100644 --- a/service/codecommit/api_op_GetDifferences.go +++ b/service/codecommit/api_op_GetDifferences.go @@ -20,26 +20,26 @@ type GetDifferencesInput struct { // The file path in which to check differences. Limits the results to this path. // Can also be used to specify the changed name of a directory or folder, if - // it has changed. If not specified, differences will be shown for all paths. + // it has changed. If not specified, differences are shown for all paths. AfterPath *string `locationName:"afterPath" type:"string"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, the full commit ID. Optional. If not specified, all - // changes prior to the afterCommitSpecifier value will be shown. If you do - // not use beforeCommitSpecifier in your request, consider limiting the results - // with maxResults. + // a commit (for example, the full commit ID). Optional. If not specified, all + // changes before the afterCommitSpecifier value are shown. If you do not use + // beforeCommitSpecifier in your request, consider limiting the results with + // maxResults. BeforeCommitSpecifier *string `locationName:"beforeCommitSpecifier" type:"string"` // The file path in which to check for differences. Limits the results to this // path. Can also be used to specify the previous name of a directory or folder. - // If beforePath and afterPath are not specified, differences will be shown - // for all paths. + // If beforePath and afterPath are not specified, differences are shown for + // all paths. BeforePath *string `locationName:"beforePath" type:"string"` - // A non-negative integer used to limit the number of returned results. + // A non-zero, non-negative integer used to limit the number of returned results. MaxResults *int64 `type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `type:"string"` @@ -78,8 +78,8 @@ func (s *GetDifferencesInput) Validate() error { type GetDifferencesOutput struct { _ struct{} `type:"structure"` - // A differences data type object that contains information about the differences, - // including whether the difference is added, modified, or deleted (A, D, M). + // A data type object that contains information about the differences, including + // whether the difference is added, modified, or deleted (A, D, M). Differences []Difference `locationName:"differences" type:"list"` // An enumeration token that can be used in a request to return the next batch @@ -98,7 +98,7 @@ const opGetDifferences = "GetDifferences" // AWS CodeCommit. // // Returns information about the differences in a valid commit specifier (such -// as a branch, tag, HEAD, commit ID or other fully qualified reference). Results +// as a branch, tag, HEAD, commit ID, or other fully qualified reference). Results // can be limited to a specified path. // // // Example sending a request using GetDifferencesRequest. diff --git a/service/codecommit/api_op_GetFile.go b/service/codecommit/api_op_GetFile.go index 712d4fb715b..c2c3c92d78a 100644 --- a/service/codecommit/api_op_GetFile.go +++ b/service/codecommit/api_op_GetFile.go @@ -12,14 +12,14 @@ import ( type GetFileInput struct { _ struct{} `type:"structure"` - // The fully-quaified reference that identifies the commit that contains the - // file. For example, you could specify a full commit ID, a tag, a branch name, - // or a reference such as refs/heads/master. If none is provided, then the head - // commit will be used. + // The fully quaified reference that identifies the commit that contains the + // file. For example, you can specify a full commit ID, a tag, a branch name, + // or a reference such as refs/heads/master. If none is provided, the head commit + // is used. CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` - // The fully-qualified path to the file, including the full name and extension - // of the file. For example, /examples/file.md is the fully-qualified path to + // The fully qualified path to the file, including the full name and extension + // of the file. For example, /examples/file.md is the fully qualified path to // a file named file.md in a folder named examples. // // FilePath is a required field @@ -83,13 +83,13 @@ type GetFileOutput struct { // // The file mode permissions returned by this API are not the standard file // mode permission values, such as 100644, but rather extrapolated values. See - // below for a full list of supported return values. + // the supported return values. // // FileMode is a required field FileMode FileModeTypeEnum `locationName:"fileMode" type:"string" required:"true" enum:"true"` - // The fully qualified path to the specified file. This returns the name and - // extension of the file. + // The fully qualified path to the specified file. Returns the name and extension + // of the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` diff --git a/service/codecommit/api_op_GetFolder.go b/service/codecommit/api_op_GetFolder.go index e8fd0cc9629..12f0cb84690 100644 --- a/service/codecommit/api_op_GetFolder.go +++ b/service/codecommit/api_op_GetFolder.go @@ -12,13 +12,13 @@ import ( type GetFolderInput struct { _ struct{} `type:"structure"` - // A fully-qualified reference used to identify a commit that contains the version - // of the folder's content to return. A fully-qualified reference can be a commit + // A fully qualified reference used to identify a commit that contains the version + // of the folder's content to return. A fully qualified reference can be a commit // ID, branch name, tag, or reference such as HEAD. If no specifier is provided, - // the folder content will be returned as it exists in the HEAD commit. + // the folder content is returned as it exists in the HEAD commit. CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` - // The fully-qualified path to the folder whose contents will be returned, including + // The fully qualified path to the folder whose contents are returned, including // the folder name. For example, /examples is a fully-qualified path to a folder // named examples that was created off of the root directory (/) of a repository. // @@ -60,28 +60,28 @@ func (s *GetFolderInput) Validate() error { type GetFolderOutput struct { _ struct{} `type:"structure"` - // The full commit ID used as a reference for which version of the folder content - // is returned. + // The full commit ID used as a reference for the returned version of the folder + // content. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The list of files that exist in the specified folder, if any. + // The list of files in the specified folder, if any. Files []File `locationName:"files" type:"list"` - // The fully-qualified path of the folder whose contents are returned. + // The fully qualified path of the folder whose contents are returned. // // FolderPath is a required field FolderPath *string `locationName:"folderPath" type:"string" required:"true"` - // The list of folders that exist beneath the specified folder, if any. + // The list of folders that exist under the specified folder, if any. SubFolders []Folder `locationName:"subFolders" type:"list"` - // The list of submodules that exist in the specified folder, if any. + // The list of submodules in the specified folder, if any. SubModules []SubModule `locationName:"subModules" type:"list"` - // The list of symbolic links to other files and folders that exist in the specified - // folder, if any. + // The list of symbolic links to other files and folders in the specified folder, + // if any. SymbolicLinks []SymbolicLink `locationName:"symbolicLinks" type:"list"` // The full SHA-1 pointer of the tree information for the commit that contains diff --git a/service/codecommit/api_op_GetMergeCommit.go b/service/codecommit/api_op_GetMergeCommit.go index ff1847d0c84..19cf6564a4f 100644 --- a/service/codecommit/api_op_GetMergeCommit.go +++ b/service/codecommit/api_op_GetMergeCommit.go @@ -13,20 +13,19 @@ type GetMergeCommitInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -38,7 +37,7 @@ type GetMergeCommitInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -86,7 +85,7 @@ type GetMergeCommitOutput struct { // The commit ID for the merge commit created when the source branch was merged // into the destination branch. If the fast-forward merge strategy was used, - // no merge commit exists. + // there is no merge commit. MergedCommitId *string `locationName:"mergedCommitId" type:"string"` // The commit ID of the source commit specifier that was used in the merge evaluation. diff --git a/service/codecommit/api_op_GetMergeConflicts.go b/service/codecommit/api_op_GetMergeConflicts.go index 71cb774eed6..557688c3738 100644 --- a/service/codecommit/api_op_GetMergeConflicts.go +++ b/service/codecommit/api_op_GetMergeConflicts.go @@ -13,20 +13,19 @@ type GetMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -39,7 +38,7 @@ type GetMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -49,7 +48,7 @@ type GetMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -95,7 +94,7 @@ type GetMergeConflictsOutput struct { BaseCommitId *string `locationName:"baseCommitId" type:"string"` // A list of metadata for any conflicting files. If the specified merge strategy - // is FAST_FORWARD_MERGE, this list will always be empty. + // is FAST_FORWARD_MERGE, this list is always empty. // // ConflictMetadataList is a required field ConflictMetadataList []ConflictMetadata `locationName:"conflictMetadataList" type:"list" required:"true"` diff --git a/service/codecommit/api_op_GetMergeOptions.go b/service/codecommit/api_op_GetMergeOptions.go index 5852fbb6a7e..e0156080d4e 100644 --- a/service/codecommit/api_op_GetMergeOptions.go +++ b/service/codecommit/api_op_GetMergeOptions.go @@ -13,20 +13,19 @@ type GetMergeOptionsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -38,7 +37,7 @@ type GetMergeOptionsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -110,8 +109,8 @@ const opGetMergeOptions = "GetMergeOptions" // AWS CodeCommit. // // Returns information about the merge options available for merging two specified -// branches. For details about why a particular merge option is not available, -// use GetMergeConflicts or DescribeMergeConflicts. +// branches. For details about why a merge option is not available, use GetMergeConflicts +// or DescribeMergeConflicts. // // // Example sending a request using GetMergeOptionsRequest. // req := client.GetMergeOptionsRequest(params) diff --git a/service/codecommit/api_op_GetPullRequestApprovalStates.go b/service/codecommit/api_op_GetPullRequestApprovalStates.go new file mode 100644 index 00000000000..3eb27c1898d --- /dev/null +++ b/service/codecommit/api_op_GetPullRequestApprovalStates.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetPullRequestApprovalStatesInput struct { + _ struct{} `type:"structure"` + + // The system-generated ID for the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID for the pull request revision. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPullRequestApprovalStatesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPullRequestApprovalStatesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPullRequestApprovalStatesInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetPullRequestApprovalStatesOutput struct { + _ struct{} `type:"structure"` + + // Information about users who have approved the pull request. + Approvals []Approval `locationName:"approvals" type:"list"` +} + +// String returns the string representation +func (s GetPullRequestApprovalStatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPullRequestApprovalStates = "GetPullRequestApprovalStates" + +// GetPullRequestApprovalStatesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Gets information about the approval states for a specified pull request. +// Approval states only apply to pull requests that have one or more approval +// rules applied to them. +// +// // Example sending a request using GetPullRequestApprovalStatesRequest. +// req := client.GetPullRequestApprovalStatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequestApprovalStates +func (c *Client) GetPullRequestApprovalStatesRequest(input *GetPullRequestApprovalStatesInput) GetPullRequestApprovalStatesRequest { + op := &aws.Operation{ + Name: opGetPullRequestApprovalStates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPullRequestApprovalStatesInput{} + } + + req := c.newRequest(op, input, &GetPullRequestApprovalStatesOutput{}) + return GetPullRequestApprovalStatesRequest{Request: req, Input: input, Copy: c.GetPullRequestApprovalStatesRequest} +} + +// GetPullRequestApprovalStatesRequest is the request type for the +// GetPullRequestApprovalStates API operation. +type GetPullRequestApprovalStatesRequest struct { + *aws.Request + Input *GetPullRequestApprovalStatesInput + Copy func(*GetPullRequestApprovalStatesInput) GetPullRequestApprovalStatesRequest +} + +// Send marshals and sends the GetPullRequestApprovalStates API request. +func (r GetPullRequestApprovalStatesRequest) Send(ctx context.Context) (*GetPullRequestApprovalStatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPullRequestApprovalStatesResponse{ + GetPullRequestApprovalStatesOutput: r.Request.Data.(*GetPullRequestApprovalStatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPullRequestApprovalStatesResponse is the response type for the +// GetPullRequestApprovalStates API operation. +type GetPullRequestApprovalStatesResponse struct { + *GetPullRequestApprovalStatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPullRequestApprovalStates request. +func (r *GetPullRequestApprovalStatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetPullRequestOverrideState.go b/service/codecommit/api_op_GetPullRequestOverrideState.go new file mode 100644 index 00000000000..08b73aa4015 --- /dev/null +++ b/service/codecommit/api_op_GetPullRequestOverrideState.go @@ -0,0 +1,136 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetPullRequestOverrideStateInput struct { + _ struct{} `type:"structure"` + + // The ID of the pull request for which you want to get information about whether + // approval rules have been set aside (overridden). + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the revision for the pull request. To retrieve + // the most recent revision ID, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPullRequestOverrideStateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPullRequestOverrideStateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPullRequestOverrideStateInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetPullRequestOverrideStateOutput struct { + _ struct{} `type:"structure"` + + // A Boolean value that indicates whether a pull request has had its rules set + // aside (TRUE) or whether all approval rules still apply (FALSE). + Overridden *bool `locationName:"overridden" type:"boolean"` + + // The Amazon Resource Name (ARN) of the user or identity that overrode the + // rules and their requirements for the pull request. + Overrider *string `locationName:"overrider" type:"string"` +} + +// String returns the string representation +func (s GetPullRequestOverrideStateOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPullRequestOverrideState = "GetPullRequestOverrideState" + +// GetPullRequestOverrideStateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Returns information about whether approval rules have been set aside (overridden) +// for a pull request, and if so, the Amazon Resource Name (ARN) of the user +// or identity that overrode the rules and their requirements for the pull request. +// +// // Example sending a request using GetPullRequestOverrideStateRequest. +// req := client.GetPullRequestOverrideStateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequestOverrideState +func (c *Client) GetPullRequestOverrideStateRequest(input *GetPullRequestOverrideStateInput) GetPullRequestOverrideStateRequest { + op := &aws.Operation{ + Name: opGetPullRequestOverrideState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPullRequestOverrideStateInput{} + } + + req := c.newRequest(op, input, &GetPullRequestOverrideStateOutput{}) + return GetPullRequestOverrideStateRequest{Request: req, Input: input, Copy: c.GetPullRequestOverrideStateRequest} +} + +// GetPullRequestOverrideStateRequest is the request type for the +// GetPullRequestOverrideState API operation. +type GetPullRequestOverrideStateRequest struct { + *aws.Request + Input *GetPullRequestOverrideStateInput + Copy func(*GetPullRequestOverrideStateInput) GetPullRequestOverrideStateRequest +} + +// Send marshals and sends the GetPullRequestOverrideState API request. +func (r GetPullRequestOverrideStateRequest) Send(ctx context.Context) (*GetPullRequestOverrideStateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPullRequestOverrideStateResponse{ + GetPullRequestOverrideStateOutput: r.Request.Data.(*GetPullRequestOverrideStateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPullRequestOverrideStateResponse is the response type for the +// GetPullRequestOverrideState API operation. +type GetPullRequestOverrideStateResponse struct { + *GetPullRequestOverrideStateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPullRequestOverrideState request. +func (r *GetPullRequestOverrideStateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetRepository.go b/service/codecommit/api_op_GetRepository.go index 9f13e0052c1..adcd6fb69e7 100644 --- a/service/codecommit/api_op_GetRepository.go +++ b/service/codecommit/api_op_GetRepository.go @@ -63,9 +63,9 @@ const opGetRepository = "GetRepository" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using GetRepositoryRequest. // req := client.GetRepositoryRequest(params) diff --git a/service/codecommit/api_op_ListApprovalRuleTemplates.go b/service/codecommit/api_op_ListApprovalRuleTemplates.go new file mode 100644 index 00000000000..350b8cc57ee --- /dev/null +++ b/service/codecommit/api_op_ListApprovalRuleTemplates.go @@ -0,0 +1,166 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListApprovalRuleTemplatesInput struct { + _ struct{} `type:"structure"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApprovalRuleTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +type ListApprovalRuleTemplatesOutput struct { + _ struct{} `type:"structure"` + + // The names of all the approval rule templates found in the AWS Region for + // your AWS account. + ApprovalRuleTemplateNames []string `locationName:"approvalRuleTemplateNames" type:"list"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApprovalRuleTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListApprovalRuleTemplates = "ListApprovalRuleTemplates" + +// ListApprovalRuleTemplatesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all approval rule templates in the specified AWS Region in your AWS +// account. If an AWS Region is not specified, the AWS Region where you are +// signed in is used. +// +// // Example sending a request using ListApprovalRuleTemplatesRequest. +// req := client.ListApprovalRuleTemplatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListApprovalRuleTemplates +func (c *Client) ListApprovalRuleTemplatesRequest(input *ListApprovalRuleTemplatesInput) ListApprovalRuleTemplatesRequest { + op := &aws.Operation{ + Name: opListApprovalRuleTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApprovalRuleTemplatesInput{} + } + + req := c.newRequest(op, input, &ListApprovalRuleTemplatesOutput{}) + return ListApprovalRuleTemplatesRequest{Request: req, Input: input, Copy: c.ListApprovalRuleTemplatesRequest} +} + +// ListApprovalRuleTemplatesRequest is the request type for the +// ListApprovalRuleTemplates API operation. +type ListApprovalRuleTemplatesRequest struct { + *aws.Request + Input *ListApprovalRuleTemplatesInput + Copy func(*ListApprovalRuleTemplatesInput) ListApprovalRuleTemplatesRequest +} + +// Send marshals and sends the ListApprovalRuleTemplates API request. +func (r ListApprovalRuleTemplatesRequest) Send(ctx context.Context) (*ListApprovalRuleTemplatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListApprovalRuleTemplatesResponse{ + ListApprovalRuleTemplatesOutput: r.Request.Data.(*ListApprovalRuleTemplatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListApprovalRuleTemplatesRequestPaginator returns a paginator for ListApprovalRuleTemplates. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListApprovalRuleTemplatesRequest(input) +// p := codecommit.NewListApprovalRuleTemplatesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListApprovalRuleTemplatesPaginator(req ListApprovalRuleTemplatesRequest) ListApprovalRuleTemplatesPaginator { + return ListApprovalRuleTemplatesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListApprovalRuleTemplatesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListApprovalRuleTemplatesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListApprovalRuleTemplatesPaginator struct { + aws.Pager +} + +func (p *ListApprovalRuleTemplatesPaginator) CurrentPage() *ListApprovalRuleTemplatesOutput { + return p.Pager.CurrentPage().(*ListApprovalRuleTemplatesOutput) +} + +// ListApprovalRuleTemplatesResponse is the response type for the +// ListApprovalRuleTemplates API operation. +type ListApprovalRuleTemplatesResponse struct { + *ListApprovalRuleTemplatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListApprovalRuleTemplates request. +func (r *ListApprovalRuleTemplatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go b/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go new file mode 100644 index 00000000000..efa81245b9d --- /dev/null +++ b/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go @@ -0,0 +1,186 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListAssociatedApprovalRuleTemplatesForRepositoryInput struct { + _ struct{} `type:"structure"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository for which you want to list all associated approval + // rule templates. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAssociatedApprovalRuleTemplatesForRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociatedApprovalRuleTemplatesForRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAssociatedApprovalRuleTemplatesForRepositoryInput"} + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssociatedApprovalRuleTemplatesForRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The names of all approval rule templates associated with the repository. + ApprovalRuleTemplateNames []string `locationName:"approvalRuleTemplateNames" type:"list"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssociatedApprovalRuleTemplatesForRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAssociatedApprovalRuleTemplatesForRepository = "ListAssociatedApprovalRuleTemplatesForRepository" + +// ListAssociatedApprovalRuleTemplatesForRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all approval rule templates that are associated with a specified repository. +// +// // Example sending a request using ListAssociatedApprovalRuleTemplatesForRepositoryRequest. +// req := client.ListAssociatedApprovalRuleTemplatesForRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListAssociatedApprovalRuleTemplatesForRepository +func (c *Client) ListAssociatedApprovalRuleTemplatesForRepositoryRequest(input *ListAssociatedApprovalRuleTemplatesForRepositoryInput) ListAssociatedApprovalRuleTemplatesForRepositoryRequest { + op := &aws.Operation{ + Name: opListAssociatedApprovalRuleTemplatesForRepository, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociatedApprovalRuleTemplatesForRepositoryInput{} + } + + req := c.newRequest(op, input, &ListAssociatedApprovalRuleTemplatesForRepositoryOutput{}) + return ListAssociatedApprovalRuleTemplatesForRepositoryRequest{Request: req, Input: input, Copy: c.ListAssociatedApprovalRuleTemplatesForRepositoryRequest} +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryRequest is the request type for the +// ListAssociatedApprovalRuleTemplatesForRepository API operation. +type ListAssociatedApprovalRuleTemplatesForRepositoryRequest struct { + *aws.Request + Input *ListAssociatedApprovalRuleTemplatesForRepositoryInput + Copy func(*ListAssociatedApprovalRuleTemplatesForRepositoryInput) ListAssociatedApprovalRuleTemplatesForRepositoryRequest +} + +// Send marshals and sends the ListAssociatedApprovalRuleTemplatesForRepository API request. +func (r ListAssociatedApprovalRuleTemplatesForRepositoryRequest) Send(ctx context.Context) (*ListAssociatedApprovalRuleTemplatesForRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAssociatedApprovalRuleTemplatesForRepositoryResponse{ + ListAssociatedApprovalRuleTemplatesForRepositoryOutput: r.Request.Data.(*ListAssociatedApprovalRuleTemplatesForRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAssociatedApprovalRuleTemplatesForRepositoryRequestPaginator returns a paginator for ListAssociatedApprovalRuleTemplatesForRepository. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAssociatedApprovalRuleTemplatesForRepositoryRequest(input) +// p := codecommit.NewListAssociatedApprovalRuleTemplatesForRepositoryRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAssociatedApprovalRuleTemplatesForRepositoryPaginator(req ListAssociatedApprovalRuleTemplatesForRepositoryRequest) ListAssociatedApprovalRuleTemplatesForRepositoryPaginator { + return ListAssociatedApprovalRuleTemplatesForRepositoryPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAssociatedApprovalRuleTemplatesForRepositoryInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAssociatedApprovalRuleTemplatesForRepositoryPaginator struct { + aws.Pager +} + +func (p *ListAssociatedApprovalRuleTemplatesForRepositoryPaginator) CurrentPage() *ListAssociatedApprovalRuleTemplatesForRepositoryOutput { + return p.Pager.CurrentPage().(*ListAssociatedApprovalRuleTemplatesForRepositoryOutput) +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryResponse is the response type for the +// ListAssociatedApprovalRuleTemplatesForRepository API operation. +type ListAssociatedApprovalRuleTemplatesForRepositoryResponse struct { + *ListAssociatedApprovalRuleTemplatesForRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAssociatedApprovalRuleTemplatesForRepository request. +func (r *ListAssociatedApprovalRuleTemplatesForRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListPullRequests.go b/service/codecommit/api_op_ListPullRequests.go index 736985a4b1a..470e3a5cce7 100644 --- a/service/codecommit/api_op_ListPullRequests.go +++ b/service/codecommit/api_op_ListPullRequests.go @@ -17,10 +17,10 @@ type ListPullRequestsInput struct { // user. AuthorArn *string `locationName:"authorArn" type:"string"` - // A non-negative integer used to limit the number of returned results. + // A non-zero, non-negative integer used to limit the number of returned results. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -59,8 +59,8 @@ func (s *ListPullRequestsInput) Validate() error { type ListPullRequestsOutput struct { _ struct{} `type:"structure"` - // An enumeration token that when provided in a request, returns the next batch - // of the results. + // An enumeration token that allows the operation to batch the next results + // of the operation. NextToken *string `locationName:"nextToken" type:"string"` // The system-generated IDs of the pull requests. diff --git a/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go b/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go new file mode 100644 index 00000000000..10a52f6aa72 --- /dev/null +++ b/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go @@ -0,0 +1,187 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListRepositoriesForApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template for which you want to list repositories + // that are associated with that template. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListRepositoriesForApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRepositoriesForApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRepositoriesForApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListRepositoriesForApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository names that are associated with the specified approval + // rule template. + RepositoryNames []string `locationName:"repositoryNames" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesForApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opListRepositoriesForApprovalRuleTemplate = "ListRepositoriesForApprovalRuleTemplate" + +// ListRepositoriesForApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all repositories associated with the specified approval rule template. +// +// // Example sending a request using ListRepositoriesForApprovalRuleTemplateRequest. +// req := client.ListRepositoriesForApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListRepositoriesForApprovalRuleTemplate +func (c *Client) ListRepositoriesForApprovalRuleTemplateRequest(input *ListRepositoriesForApprovalRuleTemplateInput) ListRepositoriesForApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opListRepositoriesForApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRepositoriesForApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &ListRepositoriesForApprovalRuleTemplateOutput{}) + return ListRepositoriesForApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.ListRepositoriesForApprovalRuleTemplateRequest} +} + +// ListRepositoriesForApprovalRuleTemplateRequest is the request type for the +// ListRepositoriesForApprovalRuleTemplate API operation. +type ListRepositoriesForApprovalRuleTemplateRequest struct { + *aws.Request + Input *ListRepositoriesForApprovalRuleTemplateInput + Copy func(*ListRepositoriesForApprovalRuleTemplateInput) ListRepositoriesForApprovalRuleTemplateRequest +} + +// Send marshals and sends the ListRepositoriesForApprovalRuleTemplate API request. +func (r ListRepositoriesForApprovalRuleTemplateRequest) Send(ctx context.Context) (*ListRepositoriesForApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRepositoriesForApprovalRuleTemplateResponse{ + ListRepositoriesForApprovalRuleTemplateOutput: r.Request.Data.(*ListRepositoriesForApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRepositoriesForApprovalRuleTemplateRequestPaginator returns a paginator for ListRepositoriesForApprovalRuleTemplate. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRepositoriesForApprovalRuleTemplateRequest(input) +// p := codecommit.NewListRepositoriesForApprovalRuleTemplateRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRepositoriesForApprovalRuleTemplatePaginator(req ListRepositoriesForApprovalRuleTemplateRequest) ListRepositoriesForApprovalRuleTemplatePaginator { + return ListRepositoriesForApprovalRuleTemplatePaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRepositoriesForApprovalRuleTemplateInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRepositoriesForApprovalRuleTemplatePaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRepositoriesForApprovalRuleTemplatePaginator struct { + aws.Pager +} + +func (p *ListRepositoriesForApprovalRuleTemplatePaginator) CurrentPage() *ListRepositoriesForApprovalRuleTemplateOutput { + return p.Pager.CurrentPage().(*ListRepositoriesForApprovalRuleTemplateOutput) +} + +// ListRepositoriesForApprovalRuleTemplateResponse is the response type for the +// ListRepositoriesForApprovalRuleTemplate API operation. +type ListRepositoriesForApprovalRuleTemplateResponse struct { + *ListRepositoriesForApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRepositoriesForApprovalRuleTemplate request. +func (r *ListRepositoriesForApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListTagsForResource.go b/service/codecommit/api_op_ListTagsForResource.go index b3d4cac42ba..50aa4dc4ef6 100644 --- a/service/codecommit/api_op_ListTagsForResource.go +++ b/service/codecommit/api_op_ListTagsForResource.go @@ -12,7 +12,7 @@ import ( type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_MergeBranchesByFastForward.go b/service/codecommit/api_op_MergeBranchesByFastForward.go index b5895e88314..af25ebfd28b 100644 --- a/service/codecommit/api_op_MergeBranchesByFastForward.go +++ b/service/codecommit/api_op_MergeBranchesByFastForward.go @@ -13,7 +13,7 @@ type MergeBranchesByFastForwardInput struct { _ struct{} `type:"structure"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -24,12 +24,12 @@ type MergeBranchesByFastForwardInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergeBranchesBySquash.go b/service/codecommit/api_op_MergeBranchesBySquash.go index 2e1af769f1d..d0788a42d80 100644 --- a/service/codecommit/api_op_MergeBranchesBySquash.go +++ b/service/codecommit/api_op_MergeBranchesBySquash.go @@ -12,43 +12,42 @@ import ( type MergeBranchesBySquashInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // file is created for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the repository where you want to merge two branches. @@ -57,12 +56,12 @@ type MergeBranchesBySquashInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergeBranchesByThreeWay.go b/service/codecommit/api_op_MergeBranchesByThreeWay.go index bf04f61d1e9..ad326fe7edf 100644 --- a/service/codecommit/api_op_MergeBranchesByThreeWay.go +++ b/service/codecommit/api_op_MergeBranchesByThreeWay.go @@ -12,43 +12,42 @@ import ( type MergeBranchesByThreeWayInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the repository where you want to merge two branches. @@ -57,12 +56,12 @@ type MergeBranchesByThreeWayInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergePullRequestByFastForward.go b/service/codecommit/api_op_MergePullRequestByFastForward.go index 5fc00e64993..304d46e5198 100644 --- a/service/codecommit/api_op_MergePullRequestByFastForward.go +++ b/service/codecommit/api_op_MergePullRequestByFastForward.go @@ -57,8 +57,7 @@ func (s *MergePullRequestByFastForwardInput) Validate() error { type MergePullRequestByFastForwardOutput struct { _ struct{} `type:"structure"` - // Information about the specified pull request, including information about - // the merge. + // Information about the specified pull request, including the merge. PullRequest *PullRequest `locationName:"pullRequest" type:"structure"` } diff --git a/service/codecommit/api_op_MergePullRequestBySquash.go b/service/codecommit/api_op_MergePullRequestBySquash.go index 8c02188a6a9..0efc6eca89c 100644 --- a/service/codecommit/api_op_MergePullRequestBySquash.go +++ b/service/codecommit/api_op_MergePullRequestBySquash.go @@ -12,37 +12,36 @@ import ( type MergePullRequestBySquashInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. diff --git a/service/codecommit/api_op_MergePullRequestByThreeWay.go b/service/codecommit/api_op_MergePullRequestByThreeWay.go index e01bf2a22d9..a0e8556843f 100644 --- a/service/codecommit/api_op_MergePullRequestByThreeWay.go +++ b/service/codecommit/api_op_MergePullRequestByThreeWay.go @@ -12,37 +12,36 @@ import ( type MergePullRequestByThreeWayInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. diff --git a/service/codecommit/api_op_OverridePullRequestApprovalRules.go b/service/codecommit/api_op_OverridePullRequestApprovalRules.go new file mode 100644 index 00000000000..ab1dd3509de --- /dev/null +++ b/service/codecommit/api_op_OverridePullRequestApprovalRules.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type OverridePullRequestApprovalRulesInput struct { + _ struct{} `type:"structure"` + + // Whether you want to set aside approval rule requirements for the pull request + // (OVERRIDE) or revoke a previous override and apply approval rule requirements + // (REVOKE). REVOKE status is not stored. + // + // OverrideStatus is a required field + OverrideStatus OverrideStatus `locationName:"overrideStatus" type:"string" required:"true" enum:"true"` + + // The system-generated ID of the pull request for which you want to override + // all approval rule requirements. To get this information, use GetPullRequest. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the most recent revision of the pull request. + // You cannot override approval rules for anything but the most recent revision + // of a pull request. To get the revision ID, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s OverridePullRequestApprovalRulesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OverridePullRequestApprovalRulesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "OverridePullRequestApprovalRulesInput"} + if len(s.OverrideStatus) == 0 { + invalidParams.Add(aws.NewErrParamRequired("OverrideStatus")) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type OverridePullRequestApprovalRulesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s OverridePullRequestApprovalRulesOutput) String() string { + return awsutil.Prettify(s) +} + +const opOverridePullRequestApprovalRules = "OverridePullRequestApprovalRules" + +// OverridePullRequestApprovalRulesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Sets aside (overrides) all approval rule requirements for a specified pull +// request. +// +// // Example sending a request using OverridePullRequestApprovalRulesRequest. +// req := client.OverridePullRequestApprovalRulesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/OverridePullRequestApprovalRules +func (c *Client) OverridePullRequestApprovalRulesRequest(input *OverridePullRequestApprovalRulesInput) OverridePullRequestApprovalRulesRequest { + op := &aws.Operation{ + Name: opOverridePullRequestApprovalRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &OverridePullRequestApprovalRulesInput{} + } + + req := c.newRequest(op, input, &OverridePullRequestApprovalRulesOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return OverridePullRequestApprovalRulesRequest{Request: req, Input: input, Copy: c.OverridePullRequestApprovalRulesRequest} +} + +// OverridePullRequestApprovalRulesRequest is the request type for the +// OverridePullRequestApprovalRules API operation. +type OverridePullRequestApprovalRulesRequest struct { + *aws.Request + Input *OverridePullRequestApprovalRulesInput + Copy func(*OverridePullRequestApprovalRulesInput) OverridePullRequestApprovalRulesRequest +} + +// Send marshals and sends the OverridePullRequestApprovalRules API request. +func (r OverridePullRequestApprovalRulesRequest) Send(ctx context.Context) (*OverridePullRequestApprovalRulesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &OverridePullRequestApprovalRulesResponse{ + OverridePullRequestApprovalRulesOutput: r.Request.Data.(*OverridePullRequestApprovalRulesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// OverridePullRequestApprovalRulesResponse is the response type for the +// OverridePullRequestApprovalRules API operation. +type OverridePullRequestApprovalRulesResponse struct { + *OverridePullRequestApprovalRulesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// OverridePullRequestApprovalRules request. +func (r *OverridePullRequestApprovalRulesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_PostCommentForComparedCommit.go b/service/codecommit/api_op_PostCommentForComparedCommit.go index 25f495f98a9..909ffc0377e 100644 --- a/service/codecommit/api_op_PostCommentForComparedCommit.go +++ b/service/codecommit/api_op_PostCommentForComparedCommit.go @@ -13,22 +13,20 @@ type PostCommentForComparedCommitInput struct { _ struct{} `type:"structure"` // To establish the directionality of the comparison, the full commit ID of - // the 'after' commit. + // the after commit. // // AfterCommitId is a required field AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` // To establish the directionality of the comparison, the full commit ID of - // the 'before' commit. - // - // This is required for commenting on any commit unless that commit is the initial - // commit. + // the before commit. Required for commenting on any commit unless that commit + // is the initial commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The content of the comment you want to make. @@ -79,18 +77,16 @@ func (s *PostCommentForComparedCommitInput) Validate() error { type PostCommentForComparedCommitOutput struct { _ struct{} `type:"structure"` - // In the directionality you established, the blob ID of the 'after' blob. + // In the directionality you established, the blob ID of the after blob. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // In the directionality you established, the full commit ID of the 'after' - // commit. + // In the directionality you established, the full commit ID of the after commit. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // In the directionality you established, the blob ID of the 'before' blob. + // In the directionality you established, the blob ID of the before blob. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - // In the directionality you established, the full commit ID of the 'before' - // commit. + // In the directionality you established, the full commit ID of the before commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // The content of the comment you posted. diff --git a/service/codecommit/api_op_PostCommentForPullRequest.go b/service/codecommit/api_op_PostCommentForPullRequest.go index 5e88d1b28c4..c1f1734cbc5 100644 --- a/service/codecommit/api_op_PostCommentForPullRequest.go +++ b/service/codecommit/api_op_PostCommentForPullRequest.go @@ -24,10 +24,10 @@ type PostCommentForPullRequestInput struct { // BeforeCommitId is a required field BeforeCommitId *string `locationName:"beforeCommitId" type:"string" required:"true"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The content of your comment on the change. @@ -36,8 +36,8 @@ type PostCommentForPullRequestInput struct { Content *string `locationName:"content" type:"string" required:"true"` // The location of the change where you want to post your comment. If no location - // is provided, the comment will be posted as a general comment on the pull - // request difference between the before commit ID and the after commit ID. + // is provided, the comment is posted as a general comment on the pull request + // difference between the before commit ID and the after commit ID. Location *Location `locationName:"location" type:"structure"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. @@ -92,14 +92,14 @@ func (s *PostCommentForPullRequestInput) Validate() error { type PostCommentForPullRequestOutput struct { _ struct{} `type:"structure"` - // In the directionality of the pull request, the blob ID of the 'after' blob. + // In the directionality of the pull request, the blob ID of the after blob. AfterBlobId *string `locationName:"afterBlobId" type:"string"` // The full commit ID of the commit in the destination branch where the pull - // request will be merged. + // request is merged. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // In the directionality of the pull request, the blob ID of the 'before' blob. + // In the directionality of the pull request, the blob ID of the before blob. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` // The full commit ID of the commit in the source branch used to create the diff --git a/service/codecommit/api_op_PostCommentReply.go b/service/codecommit/api_op_PostCommentReply.go index dc516483805..cc2a16c644b 100644 --- a/service/codecommit/api_op_PostCommentReply.go +++ b/service/codecommit/api_op_PostCommentReply.go @@ -12,10 +12,10 @@ import ( type PostCommentReplyInput struct { _ struct{} `type:"structure"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The contents of your reply to a comment. diff --git a/service/codecommit/api_op_PutFile.go b/service/codecommit/api_op_PutFile.go index cfff5a4a103..499510ba37f 100644 --- a/service/codecommit/api_op_PutFile.go +++ b/service/codecommit/api_op_PutFile.go @@ -13,14 +13,13 @@ type PutFileInput struct { _ struct{} `type:"structure"` // The name of the branch where you want to add or update the file. If this - // is an empty repository, this branch will be created. + // is an empty repository, this branch is created. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // A message about why this file was added or updated. While optional, adding - // a message is strongly encouraged in order to provide a more useful commit - // history for your repository. + // A message about why this file was added or updated. Although it is optional, + // a message makes the commit history for your repository more useful. CommitMessage *string `locationName:"commitMessage" type:"string"` // An email address for the person adding or updating the file. @@ -34,29 +33,28 @@ type PutFileInput struct { FileContent []byte `locationName:"fileContent" type:"blob" required:"true"` // The file mode permissions of the blob. Valid file mode permissions are listed - // below. + // here. FileMode FileModeTypeEnum `locationName:"fileMode" type:"string" enum:"true"` // The name of the file you want to add or update, including the relative path // to the file in the repository. // - // If the path does not currently exist in the repository, the path will be - // created as part of adding the file. + // If the path does not currently exist in the repository, the path is created + // as part of adding the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // The name of the person adding or updating the file. While optional, adding - // a name is strongly encouraged in order to provide a more useful commit history - // for your repository. + // The name of the person adding or updating the file. Although it is optional, + // a name makes the commit history for your repository more useful. Name *string `locationName:"name" type:"string"` // The full commit ID of the head commit in the branch where you want to add // or update the file. If this is an empty repository, no commit ID is required. // If this is not an empty repository, a commit ID is required. // - // The commit ID must match the ID of the head commit at the time of the operation, - // or an error will occur, and the file will not be added or updated. + // The commit ID must match the ID of the head commit at the time of the operation. + // Otherwise, an error occurs, and the file is not added or updated. ParentCommitId *string `locationName:"parentCommitId" type:"string"` // The name of the repository where you want to add or update the file. @@ -110,7 +108,7 @@ type PutFileOutput struct { // BlobId is a required field BlobId *string `locationName:"blobId" type:"string" required:"true"` - // The full SHA of the commit that contains this file change. + // The full SHA ID of the commit that contains this file change. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` diff --git a/service/codecommit/api_op_PutRepositoryTriggers.go b/service/codecommit/api_op_PutRepositoryTriggers.go index a00aecc067b..87c11af35b7 100644 --- a/service/codecommit/api_op_PutRepositoryTriggers.go +++ b/service/codecommit/api_op_PutRepositoryTriggers.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Represents the input ofa put repository triggers operation. +// Represents the input of a put repository triggers operation. type PutRepositoryTriggersInput struct { _ struct{} `type:"structure"` @@ -76,8 +76,7 @@ const opPutRepositoryTriggers = "PutRepositoryTriggers" // PutRepositoryTriggersRequest returns a request value for making API operation for // AWS CodeCommit. // -// Replaces all triggers for a repository. This can be used to create or delete -// triggers. +// Replaces all triggers for a repository. Used to create or delete triggers. // // // Example sending a request using PutRepositoryTriggersRequest. // req := client.PutRepositoryTriggersRequest(params) diff --git a/service/codecommit/api_op_TestRepositoryTriggers.go b/service/codecommit/api_op_TestRepositoryTriggers.go index 7f25c565e36..832c7698650 100644 --- a/service/codecommit/api_op_TestRepositoryTriggers.go +++ b/service/codecommit/api_op_TestRepositoryTriggers.go @@ -62,8 +62,8 @@ func (s *TestRepositoryTriggersInput) Validate() error { type TestRepositoryTriggersOutput struct { _ struct{} `type:"structure"` - // The list of triggers that were not able to be tested. This list provides - // the names of the triggers that could not be tested, separated by commas. + // The list of triggers that were not tested. This list provides the names of + // the triggers that could not be tested, separated by commas. FailedExecutions []RepositoryTriggerExecutionFailure `locationName:"failedExecutions" type:"list"` // The list of triggers that were successfully tested. This list provides the @@ -83,8 +83,8 @@ const opTestRepositoryTriggers = "TestRepositoryTriggers" // // Tests the functionality of repository triggers by sending information to // the trigger target. If real data is available in the repository, the test -// will send data from the last commit. If no data is available, sample data -// will be generated. +// sends data from the last commit. If no data is available, sample data is +// generated. // // // Example sending a request using TestRepositoryTriggersRequest. // req := client.TestRepositoryTriggersRequest(params) diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go new file mode 100644 index 00000000000..bad917f75e8 --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go @@ -0,0 +1,143 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateContentInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template where you want to update the content + // of the rule. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The SHA-256 hash signature for the content of the approval rule. You can + // retrieve this information by using GetPullRequest. + ExistingRuleContentSha256 *string `locationName:"existingRuleContentSha256" type:"string"` + + // The content that replaces the existing content of the rule. Content statements + // must be complete. You cannot provide only the changes. + // + // NewRuleContent is a required field + NewRuleContent *string `locationName:"newRuleContent" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateContentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateContentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateContentInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.NewRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("NewRuleContent")) + } + if s.NewRuleContent != nil && len(*s.NewRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewRuleContent", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateContentOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateContentOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateContent = "UpdateApprovalRuleTemplateContent" + +// UpdateApprovalRuleTemplateContentRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the content of an approval rule template. You can change the number +// of required approvals, the membership of the approval rule, and whether an +// approval pool is defined. +// +// // Example sending a request using UpdateApprovalRuleTemplateContentRequest. +// req := client.UpdateApprovalRuleTemplateContentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateContent +func (c *Client) UpdateApprovalRuleTemplateContentRequest(input *UpdateApprovalRuleTemplateContentInput) UpdateApprovalRuleTemplateContentRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateContent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateContentInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateContentOutput{}) + return UpdateApprovalRuleTemplateContentRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateContentRequest} +} + +// UpdateApprovalRuleTemplateContentRequest is the request type for the +// UpdateApprovalRuleTemplateContent API operation. +type UpdateApprovalRuleTemplateContentRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateContentInput + Copy func(*UpdateApprovalRuleTemplateContentInput) UpdateApprovalRuleTemplateContentRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateContent API request. +func (r UpdateApprovalRuleTemplateContentRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateContentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateContentResponse{ + UpdateApprovalRuleTemplateContentOutput: r.Request.Data.(*UpdateApprovalRuleTemplateContentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateContentResponse is the response type for the +// UpdateApprovalRuleTemplateContent API operation. +type UpdateApprovalRuleTemplateContentResponse struct { + *UpdateApprovalRuleTemplateContentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateContent request. +func (r *UpdateApprovalRuleTemplateContentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go new file mode 100644 index 00000000000..dc1e4be8cec --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateDescriptionInput struct { + _ struct{} `type:"structure"` + + // The updated description of the approval rule template. + // + // ApprovalRuleTemplateDescription is a required field + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string" required:"true"` + + // The name of the template for which you want to update the description. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateDescriptionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateDescriptionInput"} + + if s.ApprovalRuleTemplateDescription == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateDescription")) + } + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateDescriptionOutput struct { + _ struct{} `type:"structure"` + + // The structure and content of the updated approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateDescription = "UpdateApprovalRuleTemplateDescription" + +// UpdateApprovalRuleTemplateDescriptionRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the description for a specified approval rule template. +// +// // Example sending a request using UpdateApprovalRuleTemplateDescriptionRequest. +// req := client.UpdateApprovalRuleTemplateDescriptionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateDescription +func (c *Client) UpdateApprovalRuleTemplateDescriptionRequest(input *UpdateApprovalRuleTemplateDescriptionInput) UpdateApprovalRuleTemplateDescriptionRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateDescriptionInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateDescriptionOutput{}) + return UpdateApprovalRuleTemplateDescriptionRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateDescriptionRequest} +} + +// UpdateApprovalRuleTemplateDescriptionRequest is the request type for the +// UpdateApprovalRuleTemplateDescription API operation. +type UpdateApprovalRuleTemplateDescriptionRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateDescriptionInput + Copy func(*UpdateApprovalRuleTemplateDescriptionInput) UpdateApprovalRuleTemplateDescriptionRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateDescription API request. +func (r UpdateApprovalRuleTemplateDescriptionRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateDescriptionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateDescriptionResponse{ + UpdateApprovalRuleTemplateDescriptionOutput: r.Request.Data.(*UpdateApprovalRuleTemplateDescriptionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateDescriptionResponse is the response type for the +// UpdateApprovalRuleTemplateDescription API operation. +type UpdateApprovalRuleTemplateDescriptionResponse struct { + *UpdateApprovalRuleTemplateDescriptionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateDescription request. +func (r *UpdateApprovalRuleTemplateDescriptionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go new file mode 100644 index 00000000000..618c2c24703 --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go @@ -0,0 +1,135 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateNameInput struct { + _ struct{} `type:"structure"` + + // The new name you want to apply to the approval rule template. + // + // NewApprovalRuleTemplateName is a required field + NewApprovalRuleTemplateName *string `locationName:"newApprovalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The current name of the approval rule template. + // + // OldApprovalRuleTemplateName is a required field + OldApprovalRuleTemplateName *string `locationName:"oldApprovalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateNameInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateNameInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateNameInput"} + + if s.NewApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("NewApprovalRuleTemplateName")) + } + if s.NewApprovalRuleTemplateName != nil && len(*s.NewApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewApprovalRuleTemplateName", 1)) + } + + if s.OldApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("OldApprovalRuleTemplateName")) + } + if s.OldApprovalRuleTemplateName != nil && len(*s.OldApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OldApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateNameOutput struct { + _ struct{} `type:"structure"` + + // The structure and content of the updated approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateNameOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateName = "UpdateApprovalRuleTemplateName" + +// UpdateApprovalRuleTemplateNameRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the name of a specified approval rule template. +// +// // Example sending a request using UpdateApprovalRuleTemplateNameRequest. +// req := client.UpdateApprovalRuleTemplateNameRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateName +func (c *Client) UpdateApprovalRuleTemplateNameRequest(input *UpdateApprovalRuleTemplateNameInput) UpdateApprovalRuleTemplateNameRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateNameInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateNameOutput{}) + return UpdateApprovalRuleTemplateNameRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateNameRequest} +} + +// UpdateApprovalRuleTemplateNameRequest is the request type for the +// UpdateApprovalRuleTemplateName API operation. +type UpdateApprovalRuleTemplateNameRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateNameInput + Copy func(*UpdateApprovalRuleTemplateNameInput) UpdateApprovalRuleTemplateNameRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateName API request. +func (r UpdateApprovalRuleTemplateNameRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateNameResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateNameResponse{ + UpdateApprovalRuleTemplateNameOutput: r.Request.Data.(*UpdateApprovalRuleTemplateNameOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateNameResponse is the response type for the +// UpdateApprovalRuleTemplateName API operation. +type UpdateApprovalRuleTemplateNameResponse struct { + *UpdateApprovalRuleTemplateNameOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateName request. +func (r *UpdateApprovalRuleTemplateNameResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateComment.go b/service/codecommit/api_op_UpdateComment.go index d6aba2b030d..24782e5a506 100644 --- a/service/codecommit/api_op_UpdateComment.go +++ b/service/codecommit/api_op_UpdateComment.go @@ -18,8 +18,7 @@ type UpdateCommentInput struct { // CommentId is a required field CommentId *string `locationName:"commentId" type:"string" required:"true"` - // The updated content with which you want to replace the existing content of - // the comment. + // The updated content to replace the existing content of the comment. // // Content is a required field Content *string `locationName:"content" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go new file mode 100644 index 00000000000..3f62779d784 --- /dev/null +++ b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go @@ -0,0 +1,172 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdatePullRequestApprovalRuleContentInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule you want to update. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The SHA-256 hash signature for the content of the approval rule. You can + // retrieve this information by using GetPullRequest. + ExistingRuleContentSha256 *string `locationName:"existingRuleContentSha256" type:"string"` + + // The updated content for the approval rule. + // + // When you update the content of the approval rule, you can specify approvers + // in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following are + // counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // NewRuleContent is a required field + NewRuleContent *string `locationName:"newRuleContent" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalRuleContentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePullRequestApprovalRuleContentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdatePullRequestApprovalRuleContentInput"} + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.NewRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("NewRuleContent")) + } + if s.NewRuleContent != nil && len(*s.NewRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewRuleContent", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdatePullRequestApprovalRuleContentOutput struct { + _ struct{} `type:"structure"` + + // Information about the updated approval rule. + // + // ApprovalRule is a required field + ApprovalRule *ApprovalRule `locationName:"approvalRule" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalRuleContentOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdatePullRequestApprovalRuleContent = "UpdatePullRequestApprovalRuleContent" + +// UpdatePullRequestApprovalRuleContentRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the structure of an approval rule created specifically for a pull +// request. For example, you can change the number of required approvers and +// the approval pool for approvers. +// +// // Example sending a request using UpdatePullRequestApprovalRuleContentRequest. +// req := client.UpdatePullRequestApprovalRuleContentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestApprovalRuleContent +func (c *Client) UpdatePullRequestApprovalRuleContentRequest(input *UpdatePullRequestApprovalRuleContentInput) UpdatePullRequestApprovalRuleContentRequest { + op := &aws.Operation{ + Name: opUpdatePullRequestApprovalRuleContent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestApprovalRuleContentInput{} + } + + req := c.newRequest(op, input, &UpdatePullRequestApprovalRuleContentOutput{}) + return UpdatePullRequestApprovalRuleContentRequest{Request: req, Input: input, Copy: c.UpdatePullRequestApprovalRuleContentRequest} +} + +// UpdatePullRequestApprovalRuleContentRequest is the request type for the +// UpdatePullRequestApprovalRuleContent API operation. +type UpdatePullRequestApprovalRuleContentRequest struct { + *aws.Request + Input *UpdatePullRequestApprovalRuleContentInput + Copy func(*UpdatePullRequestApprovalRuleContentInput) UpdatePullRequestApprovalRuleContentRequest +} + +// Send marshals and sends the UpdatePullRequestApprovalRuleContent API request. +func (r UpdatePullRequestApprovalRuleContentRequest) Send(ctx context.Context) (*UpdatePullRequestApprovalRuleContentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdatePullRequestApprovalRuleContentResponse{ + UpdatePullRequestApprovalRuleContentOutput: r.Request.Data.(*UpdatePullRequestApprovalRuleContentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdatePullRequestApprovalRuleContentResponse is the response type for the +// UpdatePullRequestApprovalRuleContent API operation. +type UpdatePullRequestApprovalRuleContentResponse struct { + *UpdatePullRequestApprovalRuleContentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdatePullRequestApprovalRuleContent request. +func (r *UpdatePullRequestApprovalRuleContentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdatePullRequestApprovalState.go b/service/codecommit/api_op_UpdatePullRequestApprovalState.go new file mode 100644 index 00000000000..7051fcd86bc --- /dev/null +++ b/service/codecommit/api_op_UpdatePullRequestApprovalState.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type UpdatePullRequestApprovalStateInput struct { + _ struct{} `type:"structure"` + + // The approval state to associate with the user on the pull request. + // + // ApprovalState is a required field + ApprovalState ApprovalState `locationName:"approvalState" type:"string" required:"true" enum:"true"` + + // The system-generated ID of the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the revision. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalStateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePullRequestApprovalStateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdatePullRequestApprovalStateInput"} + if len(s.ApprovalState) == 0 { + invalidParams.Add(aws.NewErrParamRequired("ApprovalState")) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdatePullRequestApprovalStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalStateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdatePullRequestApprovalState = "UpdatePullRequestApprovalState" + +// UpdatePullRequestApprovalStateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the state of a user's approval on a pull request. The user is derived +// from the signed-in account when the request is made. +// +// // Example sending a request using UpdatePullRequestApprovalStateRequest. +// req := client.UpdatePullRequestApprovalStateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestApprovalState +func (c *Client) UpdatePullRequestApprovalStateRequest(input *UpdatePullRequestApprovalStateInput) UpdatePullRequestApprovalStateRequest { + op := &aws.Operation{ + Name: opUpdatePullRequestApprovalState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestApprovalStateInput{} + } + + req := c.newRequest(op, input, &UpdatePullRequestApprovalStateOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdatePullRequestApprovalStateRequest{Request: req, Input: input, Copy: c.UpdatePullRequestApprovalStateRequest} +} + +// UpdatePullRequestApprovalStateRequest is the request type for the +// UpdatePullRequestApprovalState API operation. +type UpdatePullRequestApprovalStateRequest struct { + *aws.Request + Input *UpdatePullRequestApprovalStateInput + Copy func(*UpdatePullRequestApprovalStateInput) UpdatePullRequestApprovalStateRequest +} + +// Send marshals and sends the UpdatePullRequestApprovalState API request. +func (r UpdatePullRequestApprovalStateRequest) Send(ctx context.Context) (*UpdatePullRequestApprovalStateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdatePullRequestApprovalStateResponse{ + UpdatePullRequestApprovalStateOutput: r.Request.Data.(*UpdatePullRequestApprovalStateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdatePullRequestApprovalStateResponse is the response type for the +// UpdatePullRequestApprovalState API operation. +type UpdatePullRequestApprovalStateResponse struct { + *UpdatePullRequestApprovalStateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdatePullRequestApprovalState request. +func (r *UpdatePullRequestApprovalStateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdatePullRequestDescription.go b/service/codecommit/api_op_UpdatePullRequestDescription.go index eee85ee6914..c9632af6031 100644 --- a/service/codecommit/api_op_UpdatePullRequestDescription.go +++ b/service/codecommit/api_op_UpdatePullRequestDescription.go @@ -13,7 +13,7 @@ type UpdatePullRequestDescriptionInput struct { _ struct{} `type:"structure"` // The updated content of the description for the pull request. This content - // will replace the existing description. + // replaces the existing description. // // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestStatus.go b/service/codecommit/api_op_UpdatePullRequestStatus.go index 280171b3de6..0b510b0b441 100644 --- a/service/codecommit/api_op_UpdatePullRequestStatus.go +++ b/service/codecommit/api_op_UpdatePullRequestStatus.go @@ -18,7 +18,7 @@ type UpdatePullRequestStatusInput struct { PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` // The status of the pull request. The only valid operations are to update the - // status from OPEN to OPEN, OPEN to CLOSED or from from CLOSED to CLOSED. + // status from OPEN to OPEN, OPEN to CLOSED or from CLOSED to CLOSED. // // PullRequestStatus is a required field PullRequestStatus PullRequestStatusEnum `locationName:"pullRequestStatus" type:"string" required:"true" enum:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestTitle.go b/service/codecommit/api_op_UpdatePullRequestTitle.go index d3e31f2cd81..c1f705346ec 100644 --- a/service/codecommit/api_op_UpdatePullRequestTitle.go +++ b/service/codecommit/api_op_UpdatePullRequestTitle.go @@ -17,7 +17,7 @@ type UpdatePullRequestTitleInput struct { // PullRequestId is a required field PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` - // The updated title of the pull request. This will replace the existing title. + // The updated title of the pull request. This replaces the existing title. // // Title is a required field Title *string `locationName:"title" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdateRepositoryDescription.go b/service/codecommit/api_op_UpdateRepositoryDescription.go index 05180c2e026..f3855b39a45 100644 --- a/service/codecommit/api_op_UpdateRepositoryDescription.go +++ b/service/codecommit/api_op_UpdateRepositoryDescription.go @@ -65,9 +65,9 @@ const opUpdateRepositoryDescription = "UpdateRepositoryDescription" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using UpdateRepositoryDescriptionRequest. // req := client.UpdateRepositoryDescriptionRequest(params) diff --git a/service/codecommit/api_op_UpdateRepositoryName.go b/service/codecommit/api_op_UpdateRepositoryName.go index 5218f0f8298..cbcc5f598dc 100644 --- a/service/codecommit/api_op_UpdateRepositoryName.go +++ b/service/codecommit/api_op_UpdateRepositoryName.go @@ -20,7 +20,7 @@ type UpdateRepositoryNameInput struct { // NewName is a required field NewName *string `locationName:"newName" min:"1" type:"string" required:"true"` - // The existing name of the repository. + // The current name of the repository. // // OldName is a required field OldName *string `locationName:"oldName" min:"1" type:"string" required:"true"` @@ -70,10 +70,10 @@ const opUpdateRepositoryName = "UpdateRepositoryName" // AWS CodeCommit. // // Renames a repository. The repository name must be unique across the calling -// AWS account. In addition, repository names are limited to 100 alphanumeric, -// dash, and underscore characters, and cannot include certain characters. The -// suffix ".git" is prohibited. For a full description of the limits on repository -// names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// AWS account. Repository names are limited to 100 alphanumeric, dash, and +// underscore characters, and cannot include certain characters. The suffix +// .git is prohibited. For more information about the limits on repository names, +// see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) // in the AWS CodeCommit User Guide. // // // Example sending a request using UpdateRepositoryNameRequest. diff --git a/service/codecommit/api_types.go b/service/codecommit/api_types.go index 33b19ea96f0..0852409858f 100644 --- a/service/codecommit/api_types.go +++ b/service/codecommit/api_types.go @@ -13,7 +13,168 @@ import ( var _ aws.Config var _ = awsutil.Prettify -// Information about errors in a BatchDescribeMergeConflicts operation. +// Returns information about a specific approval on a pull request. +type Approval struct { + _ struct{} `type:"structure"` + + // The state of the approval, APPROVE or REVOKE. REVOKE states are not stored. + ApprovalState ApprovalState `locationName:"approvalState" type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the user. + UserArn *string `locationName:"userArn" type:"string"` +} + +// String returns the string representation +func (s Approval) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an approval rule. +type ApprovalRule struct { + _ struct{} `type:"structure"` + + // The content of the approval rule. + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string"` + + // The system-generated ID of the approval rule. + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string"` + + // The name of the approval rule. + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string"` + + // The date the approval rule was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date the approval rule was most recently changed, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the user who made the most recent changes + // to the approval rule. + LastModifiedUser *string `locationName:"lastModifiedUser" type:"string"` + + // The approval rule template used to create the rule. + OriginApprovalRuleTemplate *OriginApprovalRuleTemplate `locationName:"originApprovalRuleTemplate" type:"structure"` + + // The SHA-256 hash signature for the content of the approval rule. + RuleContentSha256 *string `locationName:"ruleContentSha256" type:"string"` +} + +// String returns the string representation +func (s ApprovalRule) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an event for an approval rule. +type ApprovalRuleEventMetadata struct { + _ struct{} `type:"structure"` + + // The content of the approval rule. + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string"` + + // The system-generated ID of the approval rule. + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string"` + + // The name of the approval rule. + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an override event for approval rules for a pull +// request. +type ApprovalRuleOverriddenEventMetadata struct { + _ struct{} `type:"structure"` + + // The status of the override event. + OverrideStatus OverrideStatus `locationName:"overrideStatus" type:"string" enum:"true"` + + // The revision ID of the pull request when the override event occurred. + RevisionId *string `locationName:"revisionId" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleOverriddenEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an approval rule template. +type ApprovalRuleTemplate struct { + _ struct{} `type:"structure"` + + // The content of the approval rule template. + ApprovalRuleTemplateContent *string `locationName:"approvalRuleTemplateContent" min:"1" type:"string"` + + // The description of the approval rule template. + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string"` + + // The system-generated ID of the approval rule template. + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string"` + + // The name of the approval rule template. + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string"` + + // The date the approval rule template was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date the approval rule template was most recently changed, in timestamp + // format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the user who made the most recent changes + // to the approval rule template. + LastModifiedUser *string `locationName:"lastModifiedUser" type:"string"` + + // The SHA-256 hash signature for the content of the approval rule template. + RuleContentSha256 *string `locationName:"ruleContentSha256" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleTemplate) String() string { + return awsutil.Prettify(s) +} + +// Returns information about a change in the approval state for a pull request. +type ApprovalStateChangedEventMetadata struct { + _ struct{} `type:"structure"` + + // The approval status for the pull request. + ApprovalStatus ApprovalState `locationName:"approvalStatus" type:"string" enum:"true"` + + // The revision ID of the pull request when the approval state changed. + RevisionId *string `locationName:"revisionId" type:"string"` +} + +// String returns the string representation +func (s ApprovalStateChangedEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories +// operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesError struct { + _ struct{} `type:"structure"` + + // An error code that specifies whether the repository name was not valid or + // not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides details about why the repository name was + // not found or not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The name of the repository where the association was not made. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesError) String() string { + return awsutil.Prettify(s) +} + +// Returns information about errors in a BatchDescribeMergeConflicts operation. type BatchDescribeMergeConflictsError struct { _ struct{} `type:"structure"` @@ -38,6 +199,29 @@ func (s BatchDescribeMergeConflictsError) String() string { return awsutil.Prettify(s) } +// Returns information about errors in a BatchDisassociateApprovalRuleTemplateFromRepositories +// operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesError struct { + _ struct{} `type:"structure"` + + // An error code that specifies whether the repository name was not valid or + // not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides details about why the repository name was + // either not found or not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The name of the repository where the association with the template was not + // able to be removed. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesError) String() string { + return awsutil.Prettify(s) +} + // Returns information about errors in a BatchGetCommits operation. type BatchGetCommitsError struct { _ struct{} `type:"structure"` @@ -76,7 +260,7 @@ type BlobMetadata struct { // * 120000 indicates a symlink Mode *string `locationName:"mode" type:"string"` - // The path to the blob and any associated file name, if any. + // The path to the blob and associated file name, if any. Path *string `locationName:"path" type:"string"` } @@ -108,10 +292,10 @@ type Comment struct { // The Amazon Resource Name (ARN) of the person who posted the comment. AuthorArn *string `locationName:"authorArn" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` // The system-generated comment ID. @@ -142,16 +326,16 @@ func (s Comment) String() string { type CommentsForComparedCommit struct { _ struct{} `type:"structure"` - // The full blob ID of the commit used to establish the 'after' of the comparison. + // The full blob ID of the commit used to establish the after of the comparison. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // The full commit ID of the commit used to establish the 'after' of the comparison. + // The full commit ID of the commit used to establish the after of the comparison. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // The full blob ID of the commit used to establish the 'before' of the comparison. + // The full blob ID of the commit used to establish the before of the comparison. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - // The full commit ID of the commit used to establish the 'before' of the comparison. + // The full commit ID of the commit used to establish the before of the comparison. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // An array of comment objects. Each comment object contains information about @@ -160,7 +344,7 @@ type CommentsForComparedCommit struct { // Location information about the comment on the comparison, including the file // name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' or 'AFTER'. + // was made is BEFORE or AFTER. Location *Location `locationName:"location" type:"structure"` // The name of the repository that contains the compared commits. @@ -179,7 +363,7 @@ type CommentsForPullRequest struct { // The full blob ID of the file on which you want to comment on the source commit. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // he full commit ID of the commit that was the tip of the source branch at + // The full commit ID of the commit that was the tip of the source branch at // the time the comment was made. AfterCommitId *string `locationName:"afterCommitId" type:"string"` @@ -188,9 +372,9 @@ type CommentsForPullRequest struct { BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` // The full commit ID of the commit that was the tip of the destination branch - // when the pull request was created. This commit will be superceded by the - // after commit in the source branch when and if you merge the source branch - // into the destination branch. + // when the pull request was created. This commit is superceded by the after + // commit in the source branch when and if you merge the source branch into + // the destination branch. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // An array of comment objects. Each comment object contains information about @@ -199,7 +383,7 @@ type CommentsForPullRequest struct { // Location information about the comment on the pull request, including the // file name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' (destination branch) or 'AFTER' (source branch). + // was made is BEFORE (destination branch) or AFTER (source branch). Location *Location `locationName:"location" type:"structure"` // The system-generated ID of the pull request. @@ -218,7 +402,7 @@ func (s CommentsForPullRequest) String() string { type Commit struct { _ struct{} `type:"structure"` - // Any additional data associated with the specified commit. + // Any other data associated with the specified commit. AdditionalData *string `locationName:"additionalData" type:"string"` // Information about the author of the specified commit. Information includes @@ -226,7 +410,7 @@ type Commit struct { // the email address for the author, as configured in Git. Author *UserInfo `locationName:"author" type:"structure"` - // The full SHA of the specified commit. + // The full SHA ID of the specified commit. CommitId *string `locationName:"commitId" type:"string"` // Information about the person who committed the specified commit, also known @@ -317,18 +501,18 @@ func (s ConflictMetadata) String() string { return awsutil.Prettify(s) } -// A list of inputs to use when resolving conflicts during a merge if AUTOMERGE -// is chosen as the conflict resolution strategy. +// If AUTOMERGE is the conflict resolution strategy, a list of inputs to use +// when resolving conflicts during a merge. type ConflictResolution struct { _ struct{} `type:"structure"` - // Files that will be deleted as part of the merge conflict resolution. + // Files to be deleted as part of the merge conflict resolution. DeleteFiles []DeleteFileEntry `locationName:"deleteFiles" type:"list"` - // Files that will have content replaced as part of the merge conflict resolution. + // Files to have content replaced as part of the merge conflict resolution. ReplaceContents []ReplaceContentEntry `locationName:"replaceContents" type:"list"` - // File modes that will be set as part of the merge conflict resolution. + // File modes that are set as part of the merge conflict resolution. SetFileModes []SetFileModeEntry `locationName:"setFileModes" type:"list"` } @@ -368,12 +552,11 @@ func (s *ConflictResolution) Validate() error { return nil } -// A file that will be deleted as part of a commit. +// A file that is deleted as part of a commit. type DeleteFileEntry struct { _ struct{} `type:"structure"` - // The full path of the file that will be deleted, including the name of the - // file. + // The full path of the file to be deleted, including the name of the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` @@ -420,11 +603,35 @@ func (s Difference) String() string { return awsutil.Prettify(s) } +// Returns information about the approval rules applied to a pull request and +// whether conditions have been met. +type Evaluation struct { + _ struct{} `type:"structure"` + + // The names of the approval rules that have not had their conditions met. + ApprovalRulesNotSatisfied []string `locationName:"approvalRulesNotSatisfied" type:"list"` + + // The names of the approval rules that have had their conditions met. + ApprovalRulesSatisfied []string `locationName:"approvalRulesSatisfied" type:"list"` + + // Whether the state of the pull request is approved. + Approved *bool `locationName:"approved" type:"boolean"` + + // Whether the approval rule requirements for the pull request have been overridden + // and no longer need to be met. + Overridden *bool `locationName:"overridden" type:"boolean"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + // Returns information about a file in a repository. type File struct { _ struct{} `type:"structure"` - // The fully-qualified path to the file in the repository. + // The fully qualified path to the file in the repository. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the file information. @@ -443,12 +650,12 @@ func (s File) String() string { return awsutil.Prettify(s) } -// A file that will be added, updated, or deleted as part of a commit. +// A file to be added, updated, or deleted as part of a commit. type FileMetadata struct { _ struct{} `type:"structure"` - // The full path to the file that will be added or updated, including the name - // of the file. + // The full path to the file to be added or updated, including the name of the + // file. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the file information. @@ -506,7 +713,7 @@ func (s FileSizes) String() string { type Folder struct { _ struct{} `type:"structure"` - // The fully-qualified path of the folder in the repository. + // The fully qualified path of the folder in the repository. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The relative path of the specified folder from the folder where the query @@ -555,11 +762,11 @@ type Location struct { // if any. FilePath *string `locationName:"filePath" type:"string"` - // The position of a change within a compared file, in line number format. + // The position of a change in a compared file, in line number format. FilePosition *int64 `locationName:"filePosition" type:"long"` // In a comparison of commits or a pull request, whether the change is in the - // 'before' or 'after' of that comparison. + // before or after of that comparison. RelativeFileVersion RelativeFileVersionEnum `locationName:"relativeFileVersion" type:"string" enum:"true"` } @@ -581,9 +788,9 @@ type MergeHunk struct { // A Boolean value indicating whether a combination of hunks contains a conflict. // Conflicts occur when the same file or the same lines in a file were modified // in both the source and destination of a merge or pull request. Valid values - // include true, false, and null. This will be true when the hunk represents - // a conflict and one or more files contains a line conflict. File mode conflicts - // in a merge will not set this to be true. + // include true, false, and null. True when the hunk represents a conflict and + // one or more files contains a line conflict. File mode conflicts in a merge + // do not set this to true. IsConflict *bool `locationName:"isConflict" type:"boolean"` // Information about the merge hunk in the source of a merge or pull request. @@ -603,8 +810,8 @@ type MergeHunkDetail struct { // The end position of the hunk in the merge result. EndLine *int64 `locationName:"endLine" type:"integer"` - // The base-64 encoded content of the hunk merged region that might or might - // not contain a conflict. + // The base-64 encoded content of the hunk merged region that might contain + // a conflict. HunkContent *string `locationName:"hunkContent" type:"string"` // The start position of the hunk in the merge result. @@ -646,8 +853,8 @@ type MergeOperations struct { // The operation on a file in the destination of a merge or pull request. Destination ChangeTypeEnum `locationName:"destination" type:"string" enum:"true"` - // The operation on a file (add, modify, or delete) of a file in the source - // of a merge or pull request. + // The operation (add, modify, or delete) on a file in the source of a merge + // or pull request. Source ChangeTypeEnum `locationName:"source" type:"string" enum:"true"` } @@ -675,17 +882,37 @@ func (s ObjectTypes) String() string { return awsutil.Prettify(s) } +// Returns information about the template that created the approval rule for +// a pull request. +type OriginApprovalRuleTemplate struct { + _ struct{} `type:"structure"` + + // The ID of the template that created the approval rule. + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string"` + + // The name of the template that created the approval rule. + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string"` +} + +// String returns the string representation +func (s OriginApprovalRuleTemplate) String() string { + return awsutil.Prettify(s) +} + // Returns information about a pull request. type PullRequest struct { _ struct{} `type:"structure"` + // The approval rules applied to the pull request. + ApprovalRules []ApprovalRule `locationName:"approvalRules" type:"list"` + // The Amazon Resource Name (ARN) of the user who created the pull request. AuthorArn *string `locationName:"authorArn" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` // The date and time the pull request was originally created, in timestamp format. @@ -710,8 +937,11 @@ type PullRequest struct { // branch for the pull request. PullRequestTargets []PullRequestTarget `locationName:"pullRequestTargets" type:"list"` + // The system-generated revision ID for the pull request. + RevisionId *string `locationName:"revisionId" type:"string"` + // The user-defined title of the pull request. This title is displayed in the - // list of pull requests to other users of the repository. + // list of pull requests to other repository users. Title *string `locationName:"title" type:"string"` } @@ -750,18 +980,27 @@ type PullRequestEvent struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the user whose actions resulted in the - // event. Examples include updating the pull request with additional commits - // or changing the status of a pull request. + // event. Examples include updating the pull request with more commits or changing + // the status of a pull request. ActorArn *string `locationName:"actorArn" type:"string"` + // Information about a pull request event. + ApprovalRuleEventMetadata *ApprovalRuleEventMetadata `locationName:"approvalRuleEventMetadata" type:"structure"` + + // Information about an approval rule override event for a pull request. + ApprovalRuleOverriddenEventMetadata *ApprovalRuleOverriddenEventMetadata `locationName:"approvalRuleOverriddenEventMetadata" type:"structure"` + + // Information about an approval state change for a pull request. + ApprovalStateChangedEventMetadata *ApprovalStateChangedEventMetadata `locationName:"approvalStateChangedEventMetadata" type:"structure"` + // The day and time of the pull request event, in timestamp format. EventDate *time.Time `locationName:"eventDate" type:"timestamp"` // Information about the source and destination branches for the pull request. PullRequestCreatedEventMetadata *PullRequestCreatedEventMetadata `locationName:"pullRequestCreatedEventMetadata" type:"structure"` - // The type of the pull request event, for example a status change event (PULL_REQUEST_STATUS_CHANGED) - // or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED). + // The type of the pull request event (for example, a status change event (PULL_REQUEST_STATUS_CHANGED) + // or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED)). PullRequestEventType PullRequestEventType `locationName:"pullRequestEventType" type:"string" enum:"true"` // The system-generated ID of the pull request. @@ -787,7 +1026,7 @@ func (s PullRequestEvent) String() string { type PullRequestMergedStateChangedEventMetadata struct { _ struct{} `type:"structure"` - // The name of the branch that the pull request will be merged into. + // The name of the branch that the pull request is merged into. DestinationReference *string `locationName:"destinationReference" type:"string"` // Information about the merge state change event. @@ -848,8 +1087,8 @@ type PullRequestTarget struct { // commit where the pull request was or will be merged. DestinationCommit *string `locationName:"destinationCommit" type:"string"` - // The branch of the repository where the pull request changes will be merged - // into. Also known as the destination branch. + // The branch of the repository where the pull request changes are merged. Also + // known as the destination branch. DestinationReference *string `locationName:"destinationReference" type:"string"` // The commit ID of the most recent commit that the source branch and the destination @@ -866,7 +1105,7 @@ type PullRequestTarget struct { // The full commit ID of the tip of the source branch used to create the pull // request. If the pull request branch is updated by a push while the pull request - // is open, the commit ID will change to reflect the new tip of the branch. + // is open, the commit ID changes to reflect the new tip of the branch. SourceCommit *string `locationName:"sourceCommit" type:"string"` // The branch of the repository that contains the changes for the pull request. @@ -879,7 +1118,7 @@ func (s PullRequestTarget) String() string { return awsutil.Prettify(s) } -// Information about a file that will be added or updated as part of a commit. +// Information about a file added or updated as part of a commit. type PutFileEntry struct { _ struct{} `type:"structure"` @@ -1032,25 +1271,25 @@ func (s RepositoryNameIdPair) String() string { type RepositoryTrigger struct { _ struct{} `type:"structure"` - // The branches that will be included in the trigger configuration. If you specify - // an empty array, the trigger will apply to all branches. + // The branches to be included in the trigger configuration. If you specify + // an empty array, the trigger applies to all branches. // // Although no content is required in the array, you must include the array // itself. Branches []string `locationName:"branches" type:"list"` - // Any custom data associated with the trigger that will be included in the - // information sent to the target of the trigger. + // Any custom data associated with the trigger to be included in the information + // sent to the target of the trigger. CustomData *string `locationName:"customData" type:"string"` - // The ARN of the resource that is the target for a trigger. For example, the - // ARN of a topic in Amazon SNS. + // The ARN of the resource that is the target for a trigger (for example, the + // ARN of a topic in Amazon SNS). // // DestinationArn is a required field DestinationArn *string `locationName:"destinationArn" type:"string" required:"true"` - // The repository events that will cause the trigger to run actions in another - // service, such as sending a notification through Amazon SNS. + // The repository events that cause the trigger to run actions in another service, + // such as sending a notification through Amazon SNS. // // The valid value "all" cannot be used with any other values. // @@ -1094,7 +1333,7 @@ func (s *RepositoryTrigger) Validate() error { type RepositoryTriggerExecutionFailure struct { _ struct{} `type:"structure"` - // Additional message information about the trigger that did not run. + // Message information about the trigger that did not run. FailureMessage *string `locationName:"failureMessage" type:"string"` // The name of the trigger that did not run. @@ -1199,7 +1438,7 @@ func (s SubModule) String() string { type SymbolicLink struct { _ struct{} `type:"structure"` - // The fully-qualified path to the folder that contains the symbolic link. + // The fully qualified path to the folder that contains the symbolic link. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the information about the symbolic link. @@ -1222,8 +1461,8 @@ func (s SymbolicLink) String() string { type Target struct { _ struct{} `type:"structure"` - // The branch of the repository where the pull request changes will be merged - // into. Also known as the destination branch. + // The branch of the repository where the pull request changes are merged. Also + // known as the destination branch. DestinationReference *string `locationName:"destinationReference" type:"string"` // The name of the repository that contains the pull request. diff --git a/service/codecommit/codecommitiface/interface.go b/service/codecommit/codecommitiface/interface.go index 1c4ba71d686..95335b318d6 100644 --- a/service/codecommit/codecommitiface/interface.go +++ b/service/codecommit/codecommitiface/interface.go @@ -23,7 +23,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // CodeCommit. // func myFunc(svc codecommitiface.ClientAPI) bool { -// // Make svc.BatchDescribeMergeConflicts request +// // Make svc.AssociateApprovalRuleTemplateWithRepository request // } // // func main() { @@ -43,7 +43,7 @@ import ( // type mockClientClient struct { // codecommitiface.ClientPI // } -// func (m *mockClientClient) BatchDescribeMergeConflicts(input *codecommit.BatchDescribeMergeConflictsInput) (*codecommit.BatchDescribeMergeConflictsOutput, error) { +// func (m *mockClientClient) AssociateApprovalRuleTemplateWithRepository(input *codecommit.AssociateApprovalRuleTemplateWithRepositoryInput) (*codecommit.AssociateApprovalRuleTemplateWithRepositoryOutput, error) { // // mock response/functionality // } // @@ -61,34 +61,54 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type ClientAPI interface { + AssociateApprovalRuleTemplateWithRepositoryRequest(*codecommit.AssociateApprovalRuleTemplateWithRepositoryInput) codecommit.AssociateApprovalRuleTemplateWithRepositoryRequest + + BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(*codecommit.BatchAssociateApprovalRuleTemplateWithRepositoriesInput) codecommit.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest + BatchDescribeMergeConflictsRequest(*codecommit.BatchDescribeMergeConflictsInput) codecommit.BatchDescribeMergeConflictsRequest + BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(*codecommit.BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) codecommit.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest + BatchGetCommitsRequest(*codecommit.BatchGetCommitsInput) codecommit.BatchGetCommitsRequest BatchGetRepositoriesRequest(*codecommit.BatchGetRepositoriesInput) codecommit.BatchGetRepositoriesRequest + CreateApprovalRuleTemplateRequest(*codecommit.CreateApprovalRuleTemplateInput) codecommit.CreateApprovalRuleTemplateRequest + CreateBranchRequest(*codecommit.CreateBranchInput) codecommit.CreateBranchRequest CreateCommitRequest(*codecommit.CreateCommitInput) codecommit.CreateCommitRequest CreatePullRequestRequest(*codecommit.CreatePullRequestInput) codecommit.CreatePullRequestRequest + CreatePullRequestApprovalRuleRequest(*codecommit.CreatePullRequestApprovalRuleInput) codecommit.CreatePullRequestApprovalRuleRequest + CreateRepositoryRequest(*codecommit.CreateRepositoryInput) codecommit.CreateRepositoryRequest CreateUnreferencedMergeCommitRequest(*codecommit.CreateUnreferencedMergeCommitInput) codecommit.CreateUnreferencedMergeCommitRequest + DeleteApprovalRuleTemplateRequest(*codecommit.DeleteApprovalRuleTemplateInput) codecommit.DeleteApprovalRuleTemplateRequest + DeleteBranchRequest(*codecommit.DeleteBranchInput) codecommit.DeleteBranchRequest DeleteCommentContentRequest(*codecommit.DeleteCommentContentInput) codecommit.DeleteCommentContentRequest DeleteFileRequest(*codecommit.DeleteFileInput) codecommit.DeleteFileRequest + DeletePullRequestApprovalRuleRequest(*codecommit.DeletePullRequestApprovalRuleInput) codecommit.DeletePullRequestApprovalRuleRequest + DeleteRepositoryRequest(*codecommit.DeleteRepositoryInput) codecommit.DeleteRepositoryRequest DescribeMergeConflictsRequest(*codecommit.DescribeMergeConflictsInput) codecommit.DescribeMergeConflictsRequest DescribePullRequestEventsRequest(*codecommit.DescribePullRequestEventsInput) codecommit.DescribePullRequestEventsRequest + DisassociateApprovalRuleTemplateFromRepositoryRequest(*codecommit.DisassociateApprovalRuleTemplateFromRepositoryInput) codecommit.DisassociateApprovalRuleTemplateFromRepositoryRequest + + EvaluatePullRequestApprovalRulesRequest(*codecommit.EvaluatePullRequestApprovalRulesInput) codecommit.EvaluatePullRequestApprovalRulesRequest + + GetApprovalRuleTemplateRequest(*codecommit.GetApprovalRuleTemplateInput) codecommit.GetApprovalRuleTemplateRequest + GetBlobRequest(*codecommit.GetBlobInput) codecommit.GetBlobRequest GetBranchRequest(*codecommit.GetBranchInput) codecommit.GetBranchRequest @@ -115,16 +135,26 @@ type ClientAPI interface { GetPullRequestRequest(*codecommit.GetPullRequestInput) codecommit.GetPullRequestRequest + GetPullRequestApprovalStatesRequest(*codecommit.GetPullRequestApprovalStatesInput) codecommit.GetPullRequestApprovalStatesRequest + + GetPullRequestOverrideStateRequest(*codecommit.GetPullRequestOverrideStateInput) codecommit.GetPullRequestOverrideStateRequest + GetRepositoryRequest(*codecommit.GetRepositoryInput) codecommit.GetRepositoryRequest GetRepositoryTriggersRequest(*codecommit.GetRepositoryTriggersInput) codecommit.GetRepositoryTriggersRequest + ListApprovalRuleTemplatesRequest(*codecommit.ListApprovalRuleTemplatesInput) codecommit.ListApprovalRuleTemplatesRequest + + ListAssociatedApprovalRuleTemplatesForRepositoryRequest(*codecommit.ListAssociatedApprovalRuleTemplatesForRepositoryInput) codecommit.ListAssociatedApprovalRuleTemplatesForRepositoryRequest + ListBranchesRequest(*codecommit.ListBranchesInput) codecommit.ListBranchesRequest ListPullRequestsRequest(*codecommit.ListPullRequestsInput) codecommit.ListPullRequestsRequest ListRepositoriesRequest(*codecommit.ListRepositoriesInput) codecommit.ListRepositoriesRequest + ListRepositoriesForApprovalRuleTemplateRequest(*codecommit.ListRepositoriesForApprovalRuleTemplateInput) codecommit.ListRepositoriesForApprovalRuleTemplateRequest + ListTagsForResourceRequest(*codecommit.ListTagsForResourceInput) codecommit.ListTagsForResourceRequest MergeBranchesByFastForwardRequest(*codecommit.MergeBranchesByFastForwardInput) codecommit.MergeBranchesByFastForwardRequest @@ -139,6 +169,8 @@ type ClientAPI interface { MergePullRequestByThreeWayRequest(*codecommit.MergePullRequestByThreeWayInput) codecommit.MergePullRequestByThreeWayRequest + OverridePullRequestApprovalRulesRequest(*codecommit.OverridePullRequestApprovalRulesInput) codecommit.OverridePullRequestApprovalRulesRequest + PostCommentForComparedCommitRequest(*codecommit.PostCommentForComparedCommitInput) codecommit.PostCommentForComparedCommitRequest PostCommentForPullRequestRequest(*codecommit.PostCommentForPullRequestInput) codecommit.PostCommentForPullRequestRequest @@ -155,10 +187,20 @@ type ClientAPI interface { UntagResourceRequest(*codecommit.UntagResourceInput) codecommit.UntagResourceRequest + UpdateApprovalRuleTemplateContentRequest(*codecommit.UpdateApprovalRuleTemplateContentInput) codecommit.UpdateApprovalRuleTemplateContentRequest + + UpdateApprovalRuleTemplateDescriptionRequest(*codecommit.UpdateApprovalRuleTemplateDescriptionInput) codecommit.UpdateApprovalRuleTemplateDescriptionRequest + + UpdateApprovalRuleTemplateNameRequest(*codecommit.UpdateApprovalRuleTemplateNameInput) codecommit.UpdateApprovalRuleTemplateNameRequest + UpdateCommentRequest(*codecommit.UpdateCommentInput) codecommit.UpdateCommentRequest UpdateDefaultBranchRequest(*codecommit.UpdateDefaultBranchInput) codecommit.UpdateDefaultBranchRequest + UpdatePullRequestApprovalRuleContentRequest(*codecommit.UpdatePullRequestApprovalRuleContentInput) codecommit.UpdatePullRequestApprovalRuleContentRequest + + UpdatePullRequestApprovalStateRequest(*codecommit.UpdatePullRequestApprovalStateInput) codecommit.UpdatePullRequestApprovalStateRequest + UpdatePullRequestDescriptionRequest(*codecommit.UpdatePullRequestDescriptionInput) codecommit.UpdatePullRequestDescriptionRequest UpdatePullRequestStatusRequest(*codecommit.UpdatePullRequestStatusInput) codecommit.UpdatePullRequestStatusRequest diff --git a/service/cognitoidentityprovider/api_enums.go b/service/cognitoidentityprovider/api_enums.go index 62449915abf..a831fa10a37 100644 --- a/service/cognitoidentityprovider/api_enums.go +++ b/service/cognitoidentityprovider/api_enums.go @@ -80,12 +80,13 @@ type AuthFlowType string // Enum values for AuthFlowType const ( - AuthFlowTypeUserSrpAuth AuthFlowType = "USER_SRP_AUTH" - AuthFlowTypeRefreshTokenAuth AuthFlowType = "REFRESH_TOKEN_AUTH" - AuthFlowTypeRefreshToken AuthFlowType = "REFRESH_TOKEN" - AuthFlowTypeCustomAuth AuthFlowType = "CUSTOM_AUTH" - AuthFlowTypeAdminNoSrpAuth AuthFlowType = "ADMIN_NO_SRP_AUTH" - AuthFlowTypeUserPasswordAuth AuthFlowType = "USER_PASSWORD_AUTH" + AuthFlowTypeUserSrpAuth AuthFlowType = "USER_SRP_AUTH" + AuthFlowTypeRefreshTokenAuth AuthFlowType = "REFRESH_TOKEN_AUTH" + AuthFlowTypeRefreshToken AuthFlowType = "REFRESH_TOKEN" + AuthFlowTypeCustomAuth AuthFlowType = "CUSTOM_AUTH" + AuthFlowTypeAdminNoSrpAuth AuthFlowType = "ADMIN_NO_SRP_AUTH" + AuthFlowTypeUserPasswordAuth AuthFlowType = "USER_PASSWORD_AUTH" + AuthFlowTypeAdminUserPasswordAuth AuthFlowType = "ADMIN_USER_PASSWORD_AUTH" ) func (enum AuthFlowType) MarshalValue() (string, error) { @@ -318,9 +319,14 @@ type ExplicitAuthFlowsType string // Enum values for ExplicitAuthFlowsType const ( - ExplicitAuthFlowsTypeAdminNoSrpAuth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" - ExplicitAuthFlowsTypeCustomAuthFlowOnly ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" - ExplicitAuthFlowsTypeUserPasswordAuth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAdminNoSrpAuth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" + ExplicitAuthFlowsTypeCustomAuthFlowOnly ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" + ExplicitAuthFlowsTypeUserPasswordAuth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_ADMIN_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowCustomAuth ExplicitAuthFlowsType = "ALLOW_CUSTOM_AUTH" + ExplicitAuthFlowsTypeAllowUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowUserSrpAuth ExplicitAuthFlowsType = "ALLOW_USER_SRP_AUTH" + ExplicitAuthFlowsTypeAllowRefreshTokenAuth ExplicitAuthFlowsType = "ALLOW_REFRESH_TOKEN_AUTH" ) func (enum ExplicitAuthFlowsType) MarshalValue() (string, error) { @@ -404,6 +410,23 @@ func (enum OAuthFlowType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type PreventUserExistenceErrorTypes string + +// Enum values for PreventUserExistenceErrorTypes +const ( + PreventUserExistenceErrorTypesLegacy PreventUserExistenceErrorTypes = "LEGACY" + PreventUserExistenceErrorTypesEnabled PreventUserExistenceErrorTypes = "ENABLED" +) + +func (enum PreventUserExistenceErrorTypes) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PreventUserExistenceErrorTypes) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type RiskDecisionType string // Enum values for RiskDecisionType diff --git a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go index 6e1a4b1caf9..7f64b89b3de 100644 --- a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go @@ -48,6 +48,11 @@ type AdminInitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // AuthFlow is a required field AuthFlow AuthFlowType `type:"string" required:"true" enum:"true"` diff --git a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go index a41afc92e7d..18602f022d1 100644 --- a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go +++ b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go @@ -27,13 +27,13 @@ type ConfirmForgotPasswordInput struct { // // You create custom workflows by assigning AWS Lambda functions to user pool // triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito - // invokes the functions that are assigned to the post confirmation and pre - // mutation triggers. When Amazon Cognito invokes either of these functions, - // it passes a JSON payload, which the function receives as input. This payload - // contains a clientMetadata attribute, which provides the data that you assigned - // to the ClientMetadata parameter in your ConfirmForgotPassword request. In - // your function code in AWS Lambda, you can process the clientMetadata value - // to enhance your workflow for your specific needs. + // invokes the function that is assigned to the post confirmation trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your ConfirmForgotPassword request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. // // For more information, see Customizing User Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) diff --git a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go index e6d83fe2769..9787afe08ec 100644 --- a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go @@ -75,7 +75,28 @@ type CreateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // The explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType `type:"list"` // Boolean to specify whether you want to generate a secret for the user pool @@ -85,6 +106,44 @@ type CreateUserPoolClientInput struct { // A list of allowed logout URLs for the identity providers. LogoutURLs []string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors PreventUserExistenceErrorTypes `type:"string" enum:"true"` + // The read attributes. ReadAttributes []string `type:"list"` diff --git a/service/cognitoidentityprovider/api_op_InitiateAuth.go b/service/cognitoidentityprovider/api_op_InitiateAuth.go index 245b8c202f6..da1001b45d1 100644 --- a/service/cognitoidentityprovider/api_op_InitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_InitiateAuth.go @@ -44,6 +44,11 @@ type InitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // ADMIN_NO_SRP_AUTH is not a valid value. // // AuthFlow is a required field diff --git a/service/cognitoidentityprovider/api_op_UpdateGroup.go b/service/cognitoidentityprovider/api_op_UpdateGroup.go index 22487415726..a7df89d9a68 100644 --- a/service/cognitoidentityprovider/api_op_UpdateGroup.go +++ b/service/cognitoidentityprovider/api_op_UpdateGroup.go @@ -87,6 +87,9 @@ const opUpdateGroup = "UpdateGroup" // // Calling this action requires developer credentials. // +// If you don't provide a value for an attribute, it will be set to the default +// value. +// // // Example sending a request using UpdateGroupRequest. // req := client.UpdateGroupRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go index 7f22b81913b..b1bd2989329 100644 --- a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go +++ b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go @@ -96,6 +96,9 @@ const opUpdateResourceServer = "UpdateResourceServer" // // Updates the name and scopes of resource server. All other fields are read-only. // +// If you don't provide a value for an attribute, it will be set to the default +// value. +// // // Example sending a request using UpdateResourceServerRequest. // req := client.UpdateResourceServerRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go index a47ae0afc7c..a4a963f0c84 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go @@ -24,13 +24,13 @@ type UpdateUserAttributesInput struct { // // You create custom workflows by assigning AWS Lambda functions to user pool // triggers. When you use the UpdateUserAttributes API action, Amazon Cognito - // invokes the functions that are assigned to the custom message and pre mutation - // triggers. When Amazon Cognito invokes either of these functions, it passes - // a JSON payload, which the function receives as input. This payload contains - // a clientMetadata attribute, which provides the data that you assigned to - // the ClientMetadata parameter in your UpdateUserAttributes request. In your - // function code in AWS Lambda, you can process the clientMetadata value to - // enhance your workflow for your specific needs. + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your UpdateUserAttributes request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. // // For more information, see Customizing User Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPool.go b/service/cognitoidentityprovider/api_op_UpdateUserPool.go index 1c6f2b3070c..9c3cca5c36d 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPool.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPool.go @@ -162,9 +162,11 @@ const opUpdateUserPool = "UpdateUserPool" // UpdateUserPoolRequest returns a request value for making API operation for // Amazon Cognito Identity Provider. // -// Updates the specified user pool with the specified attributes. If you don't -// provide a value for an attribute, it will be set to the default value. You -// can get a list of the current user pool settings with . +// Updates the specified user pool with the specified attributes. You can get +// a list of the current user pool settings with . +// +// If you don't provide a value for an attribute, it will be set to the default +// value. // // // Example sending a request using UpdateUserPoolRequest. // req := client.UpdateUserPoolRequest(params) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go index 9ec6ca77b5b..6cf125aeca8 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go @@ -75,12 +75,71 @@ type UpdateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // Explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType `type:"list"` // A list of allowed logout URLs for the identity providers. LogoutURLs []string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors PreventUserExistenceErrorTypes `type:"string" enum:"true"` + // The read-only attributes of the user pool. ReadAttributes []string `type:"list"` @@ -163,8 +222,10 @@ const opUpdateUserPoolClient = "UpdateUserPoolClient" // Amazon Cognito Identity Provider. // // Updates the specified user pool app client with the specified attributes. +// You can get a list of the current user pool app client settings with . +// // If you don't provide a value for an attribute, it will be set to the default -// value. You can get a list of the current user pool app client settings with . +// value. // // // Example sending a request using UpdateUserPoolClientRequest. // req := client.UpdateUserPoolClientRequest(params) diff --git a/service/cognitoidentityprovider/api_types.go b/service/cognitoidentityprovider/api_types.go index eeee49e3dd5..a947c38e3a3 100644 --- a/service/cognitoidentityprovider/api_types.go +++ b/service/cognitoidentityprovider/api_types.go @@ -654,6 +654,22 @@ func (s DomainDescriptionType) String() string { type EmailConfigurationType struct { _ struct{} `type:"structure"` + // The set of configuration rules that can be applied to emails sent using Amazon + // SES. A configuration set is applied to an email by including a reference + // to the configuration set in the headers of the email. Once applied, all of + // the rules in that configuration set are applied to the email. Configuration + // sets can be used to apply the following types of rules to emails: + // + // * Event publishing – Amazon SES can track the number of send, delivery, + // open, click, bounce, and complaint events for each email sent. Use event + // publishing to send information about these events to other AWS services + // such as SNS and CloudWatch. + // + // * IP pool management – When leasing dedicated IP addresses with Amazon + // SES, you can create groups of IP addresses, called dedicated IP pools. + // You can then associate the dedicated IP pools with configuration sets. + ConfigurationSet *string `min:"1" type:"string"` + // Specifies whether Amazon Cognito emails your users by using its built-in // email functionality or your Amazon SES email configuration. Specify one of // the following values: @@ -696,6 +712,11 @@ type EmailConfigurationType struct { // in the Amazon Cognito Developer Guide. EmailSendingAccount EmailSendingAccountType `type:"string" enum:"true"` + // Identifies either the sender’s email address or the sender’s name with + // their email address. For example, testuser@example.com or Test User