diff --git a/CHANGELOG.md b/CHANGELOG.md index 27d8af5b380..b27125e519b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.37.20 (2021-02-26) +=== + +### Service Client Updates +* `service/eks`: Updates service API and documentation +* `service/elasticmapreduce`: Updates service API and documentation + * Added UpdateStudio API that allows updating a few attributes of an EMR Studio. +* `service/s3`: Updates service API, documentation, and examples + * Add RequestPayer to GetObjectTagging and PutObjectTagging. +* `service/sso-admin`: Updates service API + Release v1.37.19 (2021-02-25) === diff --git a/aws/version.go b/aws/version.go index 9849554f46a..0c403a140c3 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.19" +const SDKVersion = "1.37.20" diff --git a/models/apis/eks/2017-11-01/api-2.json b/models/apis/eks/2017-11-01/api-2.json index ae58b148678..ce087af01e2 100644 --- a/models/apis/eks/2017-11-01/api-2.json +++ b/models/apis/eks/2017-11-01/api-2.json @@ -13,6 +13,23 @@ "uid":"eks-2017-11-01" }, "operations":{ + "AssociateEncryptionConfig":{ + "name":"AssociateEncryptionConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/encryption-config/associate" + }, + "input":{"shape":"AssociateEncryptionConfigRequest"}, + "output":{"shape":"AssociateEncryptionConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ] + }, "AssociateIdentityProviderConfig":{ "name":"AssociateIdentityProviderConfig", "http":{ @@ -594,6 +611,31 @@ "type":"list", "member":{"shape":"AddonInfo"} }, + "AssociateEncryptionConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "encryptionConfig" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "encryptionConfig":{"shape":"EncryptionConfigList"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + } + } + }, + "AssociateEncryptionConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, "AssociateIdentityProviderConfigRequest":{ "type":"structure", "required":[ @@ -2055,6 +2097,7 @@ "ReleaseVersion", "PublicAccessCidrs", "IdentityProviderConfig", + "EncryptionConfig", "AddonVersion", "ServiceAccountRoleArn", "ResolveConflicts" @@ -2082,6 +2125,7 @@ "ConfigUpdate", "AssociateIdentityProviderConfig", "DisassociateIdentityProviderConfig", + "AssociateEncryptionConfig", "AddonUpdate" ] }, diff --git a/models/apis/eks/2017-11-01/docs-2.json b/models/apis/eks/2017-11-01/docs-2.json index 1b86c8da4f5..1673d584358 100644 --- a/models/apis/eks/2017-11-01/docs-2.json +++ b/models/apis/eks/2017-11-01/docs-2.json @@ -2,9 +2,10 @@ "version": "2.0", "service": "

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

", "operations": { + "AssociateEncryptionConfig": "

Associate encryption configuration to an existing cluster.

You can use this API to enable encryption on existing clusters which do not have encryption already enabled. This allows you to implement a defense-in-depth security strategy without migrating applications to new EKS clusters.

", "AssociateIdentityProviderConfig": "

Associate an identity provider configuration to a cluster.

If you want to authenticate identities using an identity provider, you can create an identity provider configuration and associate it to your cluster. After configuring authentication to your cluster you can create Kubernetes roles and clusterroles to assign permissions to the roles, and then bind the roles to the identities using Kubernetes rolebindings and clusterrolebindings. For more information see Using RBAC Authorization in the Kubernetes documentation.

", "CreateAddon": "

Creates an Amazon EKS add-on.

Amazon EKS add-ons help to automate the provisioning and lifecycle management of common operational software for Amazon EKS clusters. Amazon EKS add-ons can only be used with Amazon EKS clusters running version 1.18 with platform version eks.3 or later because add-ons rely on the Server-side Apply Kubernetes feature, which is only available in Kubernetes 1.18 and later.

", - "CreateCluster": "

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

", + "CreateCluster": "

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

Cluster creation typically takes several minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

", "CreateFargateProfile": "

Creates an AWS Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate.

The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate.

When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide.

Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating.

If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster.

For more information, see AWS Fargate Profile in the Amazon EKS User Guide.

", "CreateNodegroup": "

Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

", "DeleteAddon": "

Delete an Amazon EKS add-on.

When you remove the add-on, it will also be deleted from the cluster. You can always manually start an add-on on the cluster using the Kubernetes API.

", @@ -104,6 +105,16 @@ "DescribeAddonVersionsResponse$addons": "

The list of available versions with Kubernetes version compatibility.

" } }, + "AssociateEncryptionConfigRequest": { + "base": null, + "refs": { + } + }, + "AssociateEncryptionConfigResponse": { + "base": null, + "refs": { + } + }, "AssociateIdentityProviderConfigRequest": { "base": null, "refs": { @@ -394,6 +405,7 @@ "EncryptionConfigList": { "base": null, "refs": { + "AssociateEncryptionConfigRequest$encryptionConfig": "

The configuration you are using for encryption.

", "Cluster$encryptionConfig": "

The encryption configuration for the cluster.

", "CreateClusterRequest$encryptionConfig": "

The encryption configuration for the cluster.

" } @@ -781,6 +793,8 @@ "AddonInfo$type": "

The type of the add-on.

", "AddonIssue$message": "

A message that provides details about the issue and what might cause it.

", "AddonVersionInfo$addonVersion": "

The version of the add-on.

", + "AssociateEncryptionConfigRequest$clusterName": "

The name of the cluster that you are associating with encryption configuration.

", + "AssociateEncryptionConfigRequest$clientRequestToken": "

The client request token you are using with the encryption configuration.

", "AssociateIdentityProviderConfigRequest$clusterName": "

The name of the cluster to associate the configuration to.

", "AssociateIdentityProviderConfigRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "AutoScalingGroup$name": "

The name of the Auto Scaling group associated with an Amazon EKS managed node group.

", @@ -1061,6 +1075,7 @@ "Update": { "base": "

An object representing an asynchronous update.

", "refs": { + "AssociateEncryptionConfigResponse$update": null, "AssociateIdentityProviderConfigResponse$update": null, "DescribeUpdateResponse$update": "

The full description of the specified update.

", "DisassociateIdentityProviderConfigResponse$update": null, diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index 38b16f27f5f..e6b23c9f5d0 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -562,6 +562,18 @@ {"shape":"InternalServerError"} ] }, + "UpdateStudio":{ + "name":"UpdateStudio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStudioInput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "UpdateStudioSessionMapping":{ "name":"UpdateStudioSessionMapping", "http":{ @@ -1035,7 +1047,8 @@ "ServiceRole", "UserRole", "WorkspaceSecurityGroupId", - "EngineSecurityGroupId" + "EngineSecurityGroupId", + "DefaultS3Location" ], "members":{ "Name":{"shape":"XmlStringMaxLen256"}, @@ -2836,6 +2849,17 @@ "COUNT_PER_SECOND" ] }, + "UpdateStudioInput":{ + "type":"structure", + "required":["StudioId"], + "members":{ + "StudioId":{"shape":"XmlStringMaxLen256"}, + "Name":{"shape":"XmlStringMaxLen256"}, + "Description":{"shape":"XmlStringMaxLen256"}, + "SubnetIds":{"shape":"SubnetIdList"}, + "DefaultS3Location":{"shape":"XmlString"} + } + }, "UpdateStudioSessionMappingInput":{ "type":"structure", "required":[ diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index dd7c6d13f2f..09da8c20149 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -8,20 +8,20 @@ "AddTags": "

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

", "CancelSteps": "

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING state.

", "CreateSecurityConfiguration": "

Creates a security configuration, which is stored in the service and can be specified when a cluster is created.

", - "CreateStudio": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Creates a new Amazon EMR Studio.

", - "CreateStudioSessionMapping": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

", + "CreateStudio": "

Creates a new Amazon EMR Studio.

", + "CreateStudioSessionMapping": "

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

", "DeleteSecurityConfiguration": "

Deletes a security configuration.

", - "DeleteStudio": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes an Amazon EMR Studio from the Studio metadata store.

", - "DeleteStudioSessionMapping": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes a user or group from an Amazon EMR Studio.

", + "DeleteStudio": "

Removes an Amazon EMR Studio from the Studio metadata store.

", + "DeleteStudioSessionMapping": "

Removes a user or group from an Amazon EMR Studio.

", "DescribeCluster": "

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.

", "DescribeJobFlows": "

This API is no longer supported and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

Regardless of supplied parameters, only job flows created within the last two months are returned.

If no parameters are supplied, then job flows matching either of the following criteria are returned:

Amazon EMR can return a maximum of 512 job flow descriptions.

", "DescribeNotebookExecution": "

Provides details of a notebook execution.

", "DescribeSecurityConfiguration": "

Provides the details of a security configuration by returning the configuration JSON.

", "DescribeStep": "

Provides more detail about the cluster step.

", - "DescribeStudio": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

", + "DescribeStudio": "

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

", "GetBlockPublicAccessConfiguration": "

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", "GetManagedScalingPolicy": "

Fetches the attached managed scaling policy for an Amazon EMR cluster.

", - "GetStudioSessionMapping": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

", + "GetStudioSessionMapping": "

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

", "ListBootstrapActions": "

Provides information about the bootstrap actions associated with a cluster.

", "ListClusters": "

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

", "ListInstanceFleets": "

Lists all available details about the instance fleets in a cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", @@ -29,9 +29,9 @@ "ListInstances": "

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", "ListNotebookExecutions": "

Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution calls.

", "ListSecurityConfigurations": "

Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.

", - "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of ten stepIDs.

", - "ListStudioSessionMappings": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all user or group session mappings for the EMR Studio specified by StudioId.

", - "ListStudios": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

", + "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of 10 stepIDs.

", + "ListStudioSessionMappings": "

Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId.

", + "ListStudios": "

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

", "ModifyCluster": "

Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.

", "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "ModifyInstanceGroups": "

ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

", @@ -47,7 +47,8 @@ "StartNotebookExecution": "

Starts a notebook execution.

", "StopNotebookExecution": "

Stops a notebook execution.

", "TerminateJobFlows": "

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

", - "UpdateStudioSessionMapping": "

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" + "UpdateStudio": "

Updates an Amazon EMR Studio configuration, including attributes such as name, description, and subnets.

", + "UpdateStudioSessionMapping": "

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" }, "shapes": { "ActionOnFailure": { @@ -125,15 +126,15 @@ "BlockPublicAccessConfigurationMetadata$CreatedByArn": "

The Amazon Resource Name that created or last modified the configuration.

", "Cluster$ClusterArn": "

The Amazon Resource Name of the cluster.

", "ClusterSummary$ClusterArn": "

The Amazon Resource Name of the cluster.

", - "PutAutoScalingPolicyOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", - "RunJobFlowOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

" + "PutAutoScalingPolicyOutput$ClusterArn": "

The Amazon Resource Name (ARN) of the cluster.

", + "RunJobFlowOutput$ClusterArn": "

The Amazon Resource Name (ARN) of the cluster.

" } }, "AuthMode": { "base": null, "refs": { "CreateStudioInput$AuthMode": "

Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM. Amazon EMR Studio currently only supports SSO authentication.

", - "Studio$AuthMode": "

Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM.

" + "Studio$AuthMode": "

Specifies whether the Amazon EMR Studio authenticates users using single sign-on (SSO) or IAM.

" } }, "AutoScalingPolicy": { @@ -695,12 +696,12 @@ "IdentityType": { "base": null, "refs": { - "CreateStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to map to the Studio is a user or a group.

", - "DeleteStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to delete from the Studio is a user or a group.

", + "CreateStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to map to the Amazon EMR Studio is a user or a group.

", + "DeleteStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to delete from the Amazon EMR Studio is a user or a group.

", "GetStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to fetch is a user or a group.

", "ListStudioSessionMappingsInput$IdentityType": "

Specifies whether to return session mappings for users or groups. If not specified, the results include session mapping details for both users and groups.

", - "SessionMappingDetail$IdentityType": "

Specifies whether the identity mapped to the Studio is a user or a group.

", - "SessionMappingSummary$IdentityType": "

Specifies whether the identity mapped to the Studio is a user or a group.

", + "SessionMappingDetail$IdentityType": "

Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.

", + "SessionMappingSummary$IdentityType": "

Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.

", "UpdateStudioSessionMappingInput$IdentityType": "

Specifies whether the identity to update is a user or a group.

" } }, @@ -798,7 +799,7 @@ "base": null, "refs": { "InstanceFleet$InstanceFleetType": "

The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK.

", - "InstanceFleetConfig$InstanceFleetType": "

The node type that the instance fleet hosts. Valid values are MASTER,CORE,and TASK.

", + "InstanceFleetConfig$InstanceFleetType": "

The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.

", "ListInstancesInput$InstanceFleetType": "

The node type of the instance fleet. For example MASTER, CORE, or TASK.

" } }, @@ -1059,7 +1060,7 @@ } }, "InternalServerException": { - "base": "

This exception occurs when there is an internal failure in the EMR service.

", + "base": "

This exception occurs when there is an internal failure in the Amazon EMR service.

", "refs": { } }, @@ -1127,7 +1128,7 @@ "KeyValueList": { "base": null, "refs": { - "HadoopJarStepConfig$Properties": "

A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

" + "HadoopJarStepConfig$Properties": "

A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.

" } }, "ListBootstrapActionsInput": { @@ -1315,7 +1316,7 @@ "NewSupportedProductsList": { "base": null, "refs": { - "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" + "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" } }, "NonNegativeDouble": { @@ -1580,7 +1581,7 @@ "SessionMappingSummaryList": { "base": null, "refs": { - "ListStudioSessionMappingsOutput$SessionMappings": "

A list of session mapping summary objects. Each object includes session mapping details such as creation time, identity type (user or group), and Studio ID.

" + "ListStudioSessionMappingsOutput$SessionMappings": "

A list of session mapping summary objects. Each object includes session mapping details such as creation time, identity type (user or group), and Amazon EMR Studio ID.

" } }, "SetTerminationProtectionInput": { @@ -1867,12 +1868,13 @@ "SubnetIdList": { "base": null, "refs": { - "CreateStudioInput$SubnetIds": "

A list of subnet IDs to associate with the Studio. The subnets must belong to the VPC specified by VpcId. Studio users can create a Workspace in any of the specified subnets.

", - "Studio$SubnetIds": "

The list of IDs of the subnets associated with the Amazon EMR Studio.

" + "CreateStudioInput$SubnetIds": "

A list of subnet IDs to associate with the Amazon EMR Studio. A Studio can have a maximum of 5 subnets. The subnets must belong to the VPC specified by VpcId. Studio users can create a Workspace in any of the specified subnets.

", + "Studio$SubnetIds": "

The list of IDs of the subnets associated with the Amazon EMR Studio.

", + "UpdateStudioInput$SubnetIds": "

A list of subnet IDs to associate with the Amazon EMR Studio. The list can include new subnet IDs, but must also include all of the subnet IDs previously associated with the Studio. The list order does not matter. A Studio can have a maximum of 5 subnets. The subnets must belong to the same VPC as the Studio.

" } }, "SupportedProductConfig": { - "base": "

The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

", + "base": "

The list of supported product configurations that allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

", "refs": { "NewSupportedProductsList$member": null } @@ -1895,7 +1897,7 @@ "refs": { "AddTagsInput$Tags": "

A list of tags to associate with a cluster and propagate to EC2 instances. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

", "Cluster$Tags": "

A list of tags associated with a cluster.

", - "CreateStudioInput$Tags": "

A list of tags to associate with the Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

", + "CreateStudioInput$Tags": "

A list of tags to associate with the Amazon EMR Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

", "NotebookExecution$Tags": "

A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

", "RunJobFlowInput$Tags": "

A list of tags to associate with a cluster and propagate to Amazon EC2 instances.

", "StartNotebookExecutionInput$Tags": "

A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

", @@ -1913,6 +1915,11 @@ "CloudWatchAlarmDefinition$Unit": "

The unit of measure associated with the CloudWatch metric being watched. The value specified for Unit must correspond to the units specified in the CloudWatch metric.

" } }, + "UpdateStudioInput": { + "base": null, + "refs": { + } + }, "UpdateStudioSessionMappingInput": { "base": null, "refs": { @@ -1929,7 +1936,7 @@ "base": null, "refs": { "InstanceFleet$TargetOnDemandCapacity": "

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand Instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedOnDemandCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

", - "InstanceFleet$TargetSpotCapacity": "

The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

", + "InstanceFleet$TargetSpotCapacity": "

The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

", "InstanceFleet$ProvisionedOnDemandCapacity": "

The number of On-Demand units that have been provisioned for the instance fleet to fulfill TargetOnDemandCapacity. This provisioned capacity might be less than or greater than TargetOnDemandCapacity.

", "InstanceFleet$ProvisionedSpotCapacity": "

The number of Spot units that have been provisioned for this instance fleet to fulfill TargetSpotCapacity. This provisioned capacity might be less than or greater than TargetSpotCapacity.

", "InstanceFleetConfig$TargetOnDemandCapacity": "

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand Instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

", @@ -1939,7 +1946,7 @@ "InstanceTypeConfig$WeightedCapacity": "

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified.

", "InstanceTypeSpecification$WeightedCapacity": "

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.

", "SpotProvisioningSpecification$TimeoutDurationMinutes": "

The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.

", - "SpotProvisioningSpecification$BlockDurationMinutes": "

The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

" + "SpotProvisioningSpecification$BlockDurationMinutes": "

The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

" } }, "XmlString": { @@ -1950,8 +1957,8 @@ "CreateSecurityConfigurationInput$Name": "

The name of the security configuration.

", "CreateSecurityConfigurationOutput$Name": "

The name of the security configuration.

", "CreateStudioInput$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other AWS services.

", - "CreateStudioInput$UserRole": "

The IAM user role that will be assumed by users and groups logged in to a Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

", - "CreateStudioInput$DefaultS3Location": "

The default Amazon S3 location to back up EMR Studio Workspaces and notebook files. A Studio user can select an alternative Amazon S3 location when creating a Workspace.

", + "CreateStudioInput$UserRole": "

The IAM user role that will be assumed by users and groups logged in to an Amazon EMR Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

", + "CreateStudioInput$DefaultS3Location": "

The default Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files. A Studio user can select an alternative Amazon S3 location when creating a Workspace.

", "CreateStudioOutput$Url": "

The unique Studio access URL.

", "DeleteSecurityConfigurationInput$Name": "

The name of the security configuration.

", "DescribeSecurityConfigurationInput$Name": "

The name of the security configuration.

", @@ -1990,6 +1997,7 @@ "Studio$UserRole": "

The name of the IAM role assumed by users logged in to the Amazon EMR Studio.

", "Studio$Url": "

The unique access URL of the Amazon EMR Studio.

", "Studio$DefaultS3Location": "

The default Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

", + "UpdateStudioInput$DefaultS3Location": "

A default Amazon S3 location to back up Workspaces and notebook files for the Amazon EMR Studio. A Studio user can select an alternative Amazon S3 location when creating a Workspace.

", "XmlStringList$member": null } }, @@ -2018,33 +2026,33 @@ "CancelStepsInput$ClusterId": "

The ClusterID for the specified steps that will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.

", "Cluster$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.

", "CreateStudioInput$Name": "

A descriptive name for the Amazon EMR Studio.

", - "CreateStudioInput$Description": "

A detailed description of the Studio.

", + "CreateStudioInput$Description": "

A detailed description of the Amazon EMR Studio.

", "CreateStudioInput$VpcId": "

The ID of the Amazon Virtual Private Cloud (Amazon VPC) to associate with the Studio.

", "CreateStudioInput$WorkspaceSecurityGroupId": "

The ID of the Amazon EMR Studio Workspace security group. The Workspace security group allows outbound network traffic to resources in the Engine security group, and it must be in the same VPC specified by VpcId.

", "CreateStudioInput$EngineSecurityGroupId": "

The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by VpcId.

", "CreateStudioOutput$StudioId": "

The ID of the Amazon EMR Studio.

", "CreateStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio to which the user or group will be mapped.

", "CreateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "CreateStudioSessionMappingInput$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "CreateStudioSessionMappingInput$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "CreateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. Session policies refine Studio user permissions without the need to use multiple IAM user roles.

", "DeleteStudioInput$StudioId": "

The ID of the Amazon EMR Studio.

", "DeleteStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", "DeleteStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "DeleteStudioSessionMappingInput$IdentityName": "

The name of the user name or group to remove from the Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "DeleteStudioSessionMappingInput$IdentityName": "

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "DescribeNotebookExecutionInput$NotebookExecutionId": "

The unique identifier of the notebook execution.

", "DescribeStudioInput$StudioId": "

The Amazon EMR Studio ID.

", "ExecutionEngineConfig$Id": "

The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.

", "ExecutionEngineConfig$MasterInstanceSecurityGroupId": "

An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.

", "GetStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", "GetStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "GetStudioSessionMappingInput$IdentityName": "

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "GetStudioSessionMappingInput$IdentityName": "

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "InstanceFleet$Name": "

A friendly name for the instance fleet.

", "InstanceFleetConfig$Name": "

The friendly name of the instance fleet.

", "InstanceGroupConfig$Name": "

Friendly name given to the instance group.

", - "InstanceGroupConfig$BidPrice": "

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", + "InstanceGroupConfig$BidPrice": "

The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceGroupDetail$InstanceGroupId": "

Unique identifier for the instance group.

", "InstanceGroupDetail$Name": "

Friendly name for the instance group.

", - "InstanceGroupDetail$BidPrice": "

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", + "InstanceGroupDetail$BidPrice": "

The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceGroupIdsList$member": null, "InstanceGroupModifyConfig$InstanceGroupId": "

Unique ID of the instance group to modify.

", "InstanceTypeConfig$BidPrice": "

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", @@ -2080,15 +2088,15 @@ "RunJobFlowInput$AmiVersion": "

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

", "RunJobFlowInput$ReleaseLabel": "

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

", "RunJobFlowInput$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion instead.

For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.

", - "RunJobFlowOutput$JobFlowId": "

An unique identifier for the job flow.

", + "RunJobFlowOutput$JobFlowId": "

A unique identifier for the job flow.

", "SecurityGroupsList$member": null, "SessionMappingDetail$StudioId": "

The ID of the Amazon EMR Studio.

", "SessionMappingDetail$IdentityId": "

The globally unique identifier (GUID) of the user or group.

", - "SessionMappingDetail$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", + "SessionMappingDetail$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", "SessionMappingDetail$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

", "SessionMappingSummary$StudioId": "

The ID of the Amazon EMR Studio.

", "SessionMappingSummary$IdentityId": "

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store.

", - "SessionMappingSummary$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", + "SessionMappingSummary$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", "SessionMappingSummary$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

", "StartNotebookExecutionInput$EditorId": "

The unique identifier of the EMR Notebook to use for notebook execution.

", "StartNotebookExecutionInput$NotebookExecutionName": "

An optional name for the notebook execution.

", @@ -2097,23 +2105,26 @@ "StepConfig$Name": "

The name of the step.

", "StepIdsList$member": null, "StopNotebookExecutionInput$NotebookExecutionId": "

The unique identifier of the notebook execution.

", - "Studio$StudioId": "

The ID of the EMR Studio.

", - "Studio$StudioArn": "

The Amazon Resource Name (ARN) of the EMR Studio.

", - "Studio$Name": "

The name of the EMR Studio.

", - "Studio$Description": "

The detailed description of the EMR Studio.

", - "Studio$VpcId": "

The ID of the VPC associated with the EMR Studio.

", + "Studio$StudioId": "

The ID of the Amazon EMR Studio.

", + "Studio$StudioArn": "

The Amazon Resource Name (ARN) of the Amazon EMR Studio.

", + "Studio$Name": "

The name of the Amazon EMR Studio.

", + "Studio$Description": "

The detailed description of the Amazon EMR Studio.

", + "Studio$VpcId": "

The ID of the VPC associated with the Amazon EMR Studio.

", "Studio$WorkspaceSecurityGroupId": "

The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.

", "Studio$EngineSecurityGroupId": "

The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.

", "StudioSummary$StudioId": "

The ID of the Amazon EMR Studio.

", "StudioSummary$Name": "

The name of the Amazon EMR Studio.

", "StudioSummary$VpcId": "

The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.

", - "StudioSummary$Description": "

The detailed description of the EMR Studio.

", + "StudioSummary$Description": "

The detailed description of the Amazon EMR Studio.

", "StudioSummary$Url": "

The unique access URL of the Amazon EMR Studio.

", "SupportedProductConfig$Name": "

The name of the product configuration.

", "SupportedProductsList$member": null, - "UpdateStudioSessionMappingInput$StudioId": "

The ID of the EMR Studio.

", + "UpdateStudioInput$StudioId": "

The ID of the Amazon EMR Studio to update.

", + "UpdateStudioInput$Name": "

A descriptive name for the Amazon EMR Studio.

", + "UpdateStudioInput$Description": "

A detailed description to assign to the Amazon EMR Studio.

", + "UpdateStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", "UpdateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "UpdateStudioSessionMappingInput$IdentityName": "

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "UpdateStudioSessionMappingInput$IdentityName": "

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "UpdateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.

", "XmlStringMaxLen256List$member": null } diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index a1cfa8b45aa..4592af96b9c 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -3662,6 +3662,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -6478,6 +6483,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } }, "payload":"Tagging" diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 6c20973aab9..0be3035e58d 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -28,7 +28,7 @@ "GetBucketAcl": "

This implementation of the GET operation uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

Related Resources

", "GetBucketAnalyticsConfiguration": "

This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

Related Resources

", "GetBucketCors": "

Returns the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

For more information about cors, see Enabling Cross-Origin Resource Sharing.

The following operations are related to GetBucketCors:

", - "GetBucketEncryption": "

Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

", + "GetBucketEncryption": "

Returns the default encryption configuration for an Amazon S3 bucket. If the bucket does not have a default encryption configuration, GetBucketEncryption returns ServerSideEncryptionConfigurationNotFoundError.

For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

", "GetBucketIntelligentTieringConfiguration": "

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to GetBucketIntelligentTieringConfiguration include:

", "GetBucketInventoryConfiguration": "

Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

The following operations are related to GetBucketInventoryConfiguration:

", "GetBucketLifecycle": "

For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycle has the following special error:

The following operations are related to GetBucketLifecycle:

", @@ -51,11 +51,11 @@ "GetObjectLegalHold": "

Gets an object's current Legal Hold status. For more information, see Locking Objects.

This action is not supported by Amazon S3 on Outposts.

", "GetObjectLockConfiguration": "

Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.

", "GetObjectRetention": "

Retrieves an object's retention settings. For more information, see Locking Objects.

This action is not supported by Amazon S3 on Outposts.

", - "GetObjectTagging": "

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following operation is related to GetObjectTagging:

", + "GetObjectTagging": "

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following operation is related to GetObjectTagging:

", "GetObjectTorrent": "

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

To use GET, you must have READ access to the object.

This action is not supported by Amazon S3 on Outposts.

The following operation is related to GetObjectTorrent:

", "GetPublicAccessBlock": "

Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

The following operations are related to GetPublicAccessBlock:

", - "HeadBucket": "

This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found and 403 Forbidden.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

", - "HeadObject": "

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following operation is related to HeadObject:

", + "HeadBucket": "

This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A message body is not included, so you cannot determine the exception beyond these error codes.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

", + "HeadObject": "

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve the exact exception beyond these error codes.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following operation is related to HeadObject:

", "ListBucketAnalyticsConfigurations": "

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

", "ListBucketIntelligentTieringConfigurations": "

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to ListBucketIntelligentTieringConfigurations include:

", "ListBucketInventoryConfigurations": "

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

", @@ -64,14 +64,14 @@ "ListMultipartUploads": "

This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListMultipartUploads:

", "ListObjectVersions": "

Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

To use this operation, you must have READ access to the bucket.

This action is not supported by Amazon S3 on Outposts.

The following operations are related to ListObjectVersions:

", "ListObjects": "

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

", - "ListObjectsV2": "

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

To use this operation, you must have READ access to the bucket.

To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

", + "ListObjectsV2": "

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.

To use this operation, you must have READ access to the bucket.

To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

", "ListParts": "

Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListParts:

", "PutBucketAccelerateConfiguration": "

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The Transfer Acceleration state of a bucket can be set to one of the following two values:

The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

For more information about transfer acceleration, see Transfer Acceleration.

The following operations are related to PutBucketAccelerateConfiguration:

", "PutBucketAcl": "

Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

You can use one of the following two ways to set a bucket's permissions:

You cannot specify access permission using both the body and the request headers.

Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

Access Permissions

You can set access permissions using one of the following methods:

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

Related Resources

", "PutBucketAnalyticsConfiguration": "

Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

Special Errors

Related Resources

", "PutBucketCors": "

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

", "PutBucketEncryption": "

This operation uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", - "PutBucketIntelligentTieringConfiguration": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

", + "PutBucketIntelligentTieringConfiguration": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.

Special Errors

", "PutBucketInventoryConfiguration": "

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

Related Resources

", "PutBucketLifecycle": "

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", "PutBucketLifecycleConfiguration": "

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

", @@ -91,7 +91,7 @@ "PutObjectLegalHold": "

Applies a Legal Hold configuration to the specified object.

This action is not supported by Amazon S3 on Outposts.

Related Resources

", "PutObjectLockConfiguration": "

Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.

DefaultRetention requires either Days or Years. You can't specify both at the same time.

Related Resources

", "PutObjectRetention": "

Places an Object Retention configuration on an object.

This action is not supported by Amazon S3 on Outposts.

Related Resources

", - "PutObjectTagging": "

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", + "PutObjectTagging": "

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", "PutPublicAccessBlock": "

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

Related Resources

", "RestoreObject": "

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", "SelectObjectContent": "

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

", @@ -671,8 +671,8 @@ "refs": { "ListMultipartUploadsOutput$CommonPrefixes": "

If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a CommonPrefixes element. The distinct key prefixes are returned in the Prefix child element.

", "ListObjectVersionsOutput$CommonPrefixes": "

All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

", - "ListObjectsOutput$CommonPrefixes": "

All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

", - "ListObjectsV2Output$CommonPrefixes": "

All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" + "ListObjectsOutput$CommonPrefixes": "

All of the keys (up to 1,000) rolled up in a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

", + "ListObjectsV2Output$CommonPrefixes": "

All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" } }, "CompleteMultipartUploadOutput": { @@ -772,7 +772,7 @@ "PutBucketOwnershipControlsRequest$ContentMD5": "

The MD5 hash of the OwnershipControls request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "PutBucketPolicyRequest$ContentMD5": "

The MD5 hash of the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "PutBucketReplicationRequest$ContentMD5": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", - "PutBucketRequestPaymentRequest$ContentMD5": "

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", + "PutBucketRequestPaymentRequest$ContentMD5": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "PutBucketTaggingRequest$ContentMD5": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "PutBucketVersioningRequest$ContentMD5": "

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "PutBucketWebsiteRequest$ContentMD5": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", @@ -1157,7 +1157,7 @@ "refs": { "CompleteMultipartUploadOutput$ETag": "

Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.

", "CompletedPart$ETag": "

Entity tag returned when the part was uploaded.

", - "CopyObjectResult$ETag": "

Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied object.

", + "CopyObjectResult$ETag": "

Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied non-multipart object.

", "CopyPartResult$ETag": "

Entity tag of the object.

", "GetObjectOutput$ETag": "

An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.

", "HeadObjectOutput$ETag": "

An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.

", @@ -2037,7 +2037,7 @@ "KeyCount": { "base": null, "refs": { - "ListObjectsV2Output$KeyCount": "

KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys

" + "ListObjectsV2Output$KeyCount": "

KeyCount is the number of keys returned with this request. KeyCount will always be less than or equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys

" } }, "KeyMarker": { @@ -2052,7 +2052,7 @@ "KeyPrefixEquals": { "base": null, "refs": { - "Condition$KeyPrefixEquals": "

The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

" + "Condition$KeyPrefixEquals": "

The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "LambdaFunctionArn": { @@ -2076,12 +2076,12 @@ "LastModified": { "base": null, "refs": { - "CopyObjectResult$LastModified": "

Returns the date that the object was last modified.

", + "CopyObjectResult$LastModified": "

Creation date of the object.

", "CopyPartResult$LastModified": "

Date and time at which the object was uploaded.

", "DeleteMarkerEntry$LastModified": "

Date and time the object was last modified.

", - "GetObjectOutput$LastModified": "

Last modified date of the object

", - "HeadObjectOutput$LastModified": "

Last modified date of the object

", - "Object$LastModified": "

The date the Object was Last Modified

", + "GetObjectOutput$LastModified": "

Creation date of the object.

", + "HeadObjectOutput$LastModified": "

Creation date of the object.

", + "Object$LastModified": "

Creation date of the object.

", "ObjectVersion$LastModified": "

Date and time the object was last modified.

", "Part$LastModified": "

Date and time at which the part was uploaded.

" } @@ -2575,10 +2575,10 @@ "CreateMultipartUploadRequest$Key": "

Object key for which the multipart upload is to be initiated.

", "DeleteMarkerEntry$Key": "

The object key.

", "DeleteObjectRequest$Key": "

Key name of the object to delete.

", - "DeleteObjectTaggingRequest$Key": "

Name of the object key.

", + "DeleteObjectTaggingRequest$Key": "

The key that identifies the object in the bucket from which to remove all tags.

", "DeletedObject$Key": "

The name of the deleted object.

", "Error$Key": "

The error key.

", - "ErrorDocument$Key": "

The object key name to use when a 4XX class error occurs.

", + "ErrorDocument$Key": "

The object key name to use when a 4XX class error occurs.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "GetObjectAclRequest$Key": "

The key of the object for which to get the ACL information.

", "GetObjectLegalHoldRequest$Key": "

The key name for the object whose Legal Hold status you want to retrieve.

", "GetObjectRequest$Key": "

Key of the object to get.

", @@ -2590,7 +2590,7 @@ "ListPartsRequest$Key": "

Object key for which the multipart upload was initiated.

", "MultipartUpload$Key": "

Key of the object for which the multipart upload was initiated.

", "Object$Key": "

The name that you assign to an object. You use the object key to retrieve the object.

", - "ObjectIdentifier$Key": "

Key name of the object to delete.

", + "ObjectIdentifier$Key": "

Key name of the object.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "ObjectVersion$Key": "

The object key.

", "PutObjectAclRequest$Key": "

Key for which the PUT operation was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "PutObjectLegalHoldRequest$Key": "

The key name for the object that you want to place a Legal Hold on.

", @@ -2891,12 +2891,12 @@ "AnalyticsS3BucketDestination$Prefix": "

The prefix to use when exporting data. The prefix is prepended to all results.

", "CommonPrefix$Prefix": "

Container for the specified common prefix.

", "IntelligentTieringAndOperator$Prefix": "

An object key name prefix that identifies the subset of objects to which the configuration applies.

", - "IntelligentTieringFilter$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

", + "IntelligentTieringFilter$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "InventoryFilter$Prefix": "

The prefix that an object must have to be included in the inventory results.

", "InventoryS3BucketDestination$Prefix": "

The prefix that is prepended to all inventory results.

", - "LifecycleRule$Prefix": "

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

", + "LifecycleRule$Prefix": "

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "LifecycleRuleAndOperator$Prefix": "

Prefix identifying one or more objects to which the rule applies.

", - "LifecycleRuleFilter$Prefix": "

Prefix identifying one or more objects to which the rule applies.

", + "LifecycleRuleFilter$Prefix": "

Prefix identifying one or more objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "ListMultipartUploadsOutput$Prefix": "

When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.

", "ListMultipartUploadsRequest$Prefix": "

Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.)

", "ListObjectVersionsOutput$Prefix": "

Selects objects that start with the value supplied by this parameter.

", @@ -2907,10 +2907,10 @@ "ListObjectsV2Request$Prefix": "

Limits the response to keys that begin with the specified prefix.

", "MetricsAndOperator$Prefix": "

The prefix used when evaluating an AND predicate.

", "MetricsFilter$Prefix": "

The prefix used when evaluating a metrics filter.

", - "ReplicationRule$Prefix": "

An object key name prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

", + "ReplicationRule$Prefix": "

An object key name prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "ReplicationRuleAndOperator$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

", - "ReplicationRuleFilter$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

", - "Rule$Prefix": "

Object key prefix that identifies one or more objects to which this rule applies.

" + "ReplicationRuleFilter$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", + "Rule$Prefix": "

Object key prefix that identifies one or more objects to which this rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "Priority": { @@ -3198,13 +3198,13 @@ "ReplaceKeyPrefixWith": { "base": null, "refs": { - "Redirect$ReplaceKeyPrefixWith": "

The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.

" + "Redirect$ReplaceKeyPrefixWith": "

The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "ReplaceKeyWith": { "base": null, "refs": { - "Redirect$ReplaceKeyWith": "

The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the siblings is present. Can be present only if ReplaceKeyPrefixWith is not provided.

" + "Redirect$ReplaceKeyWith": "

The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the siblings is present. Can be present only if ReplaceKeyPrefixWith is not provided.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "ReplicaKmsKeyID": { @@ -3325,6 +3325,7 @@ "GetObjectLegalHoldRequest$RequestPayer": null, "GetObjectRequest$RequestPayer": null, "GetObjectRetentionRequest$RequestPayer": null, + "GetObjectTaggingRequest$RequestPayer": null, "GetObjectTorrentRequest$RequestPayer": null, "HeadObjectRequest$RequestPayer": null, "ListObjectsRequest$RequestPayer": "

Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.

", @@ -3335,6 +3336,7 @@ "PutObjectLockConfigurationRequest$RequestPayer": null, "PutObjectRequest$RequestPayer": null, "PutObjectRetentionRequest$RequestPayer": null, + "PutObjectTaggingRequest$RequestPayer": null, "RestoreObjectRequest$RequestPayer": null, "UploadPartCopyRequest$RequestPayer": null, "UploadPartRequest$RequestPayer": null @@ -3475,7 +3477,7 @@ "CreateMultipartUploadOutput$SSECustomerAlgorithm": "

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "CreateMultipartUploadRequest$SSECustomerAlgorithm": "

Specifies the algorithm to use to when encrypting the object (for example, AES256).

", "GetObjectOutput$SSECustomerAlgorithm": "

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", - "GetObjectRequest$SSECustomerAlgorithm": "

Specifies the algorithm to use to when encrypting the object (for example, AES256).

", + "GetObjectRequest$SSECustomerAlgorithm": "

Specifies the algorithm to use to when decrypting the object (for example, AES256).

", "HeadObjectOutput$SSECustomerAlgorithm": "

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "HeadObjectRequest$SSECustomerAlgorithm": "

Specifies the algorithm to use to when encrypting the object (for example, AES256).

", "PutObjectOutput$SSECustomerAlgorithm": "

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", @@ -3492,7 +3494,7 @@ "refs": { "CopyObjectRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", "CreateMultipartUploadRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", - "GetObjectRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", + "GetObjectRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", "HeadObjectRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", "PutObjectRequest$SSECustomerKey": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", "SelectObjectContentRequest$SSECustomerKey": "

The SSE Customer Key. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

", @@ -3729,7 +3731,7 @@ "Suffix": { "base": null, "refs": { - "IndexDocument$Suffix": "

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

" + "IndexDocument$Suffix": "

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "Tag": { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 661b5de3542..1720486a42d 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -334,12 +334,10 @@ "Delete": { "Objects": [ { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "Key": "objectkey2" } ], "Quiet": false @@ -348,12 +346,14 @@ "output": { "Deleted": [ { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + "Key": "objectkey2" } ] }, @@ -363,9 +363,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", - "title": "To delete multiple object versions from a versioned bucket" + "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", + "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", + "title": "To delete multiple objects from a versioned bucket" }, { "input": { @@ -373,10 +373,12 @@ "Delete": { "Objects": [ { - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" }, { - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" } ], "Quiet": false @@ -385,14 +387,12 @@ "output": { "Deleted": [ { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" }, { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" } ] }, @@ -402,9 +402,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", - "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", - "title": "To delete multiple objects from a versioned bucket" + "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", + "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", + "title": "To delete multiple object versions from a versioned bucket" } ], "GetBucketCors": [ @@ -728,18 +728,17 @@ { "input": { "Bucket": "examplebucket", - "Key": "SampleFile.txt", - "Range": "bytes=0-9" + "Key": "HappyFace.jpg" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "10", - "ContentRange": "bytes 0-9/43", - "ContentType": "text/plain", - "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", - "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", + "ContentLength": "3191", + "ContentType": "image/jpeg", + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", + "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", "Metadata": { }, + "TagCount": 2, "VersionId": "null" }, "comments": { @@ -748,24 +747,25 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", - "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", - "title": "To retrieve a byte range of an object " + "description": "The following example retrieves an object for an S3 bucket.", + "id": "to-retrieve-an-object-1481827837012", + "title": "To retrieve an object" }, { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "SampleFile.txt", + "Range": "bytes=0-9" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "3191", - "ContentType": "image/jpeg", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", + "ContentLength": "10", + "ContentRange": "bytes 0-9/43", + "ContentType": "text/plain", + "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", + "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", "Metadata": { }, - "TagCount": 2, "VersionId": "null" }, "comments": { @@ -774,9 +774,9 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket.", - "id": "to-retrieve-an-object-1481827837012", - "title": "To retrieve an object" + "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", + "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", + "title": "To retrieve a byte range of an object " } ], "GetObjectAcl": [ @@ -840,20 +840,17 @@ { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { "TagSet": [ { - "Key": "Key4", - "Value": "Value4" - }, - { - "Key": "Key3", - "Value": "Value3" + "Key": "Key1", + "Value": "Value1" } ], - "VersionId": "null" + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "comments": { "input": { @@ -861,24 +858,27 @@ "output": { } }, - "description": "The following example retrieves tag set of an object.", - "id": "to-retrieve-tag-set-of-an-object-1481833847896", - "title": "To retrieve tag set of an object" + "description": "The following example retrieves tag set of an object. The request specifies object version.", + "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", + "title": "To retrieve tag set of a specific object version" }, { "input": { "Bucket": "examplebucket", - "Key": "exampleobject", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { "TagSet": [ { - "Key": "Key1", - "Value": "Value1" + "Key": "Key4", + "Value": "Value4" + }, + { + "Key": "Key3", + "Value": "Value3" } ], - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "VersionId": "null" }, "comments": { "input": { @@ -886,9 +886,9 @@ "output": { } }, - "description": "The following example retrieves tag set of an object. The request specifies object version.", - "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", - "title": "To retrieve tag set of a specific object version" + "description": "The following example retrieves tag set of an object.", + "id": "to-retrieve-tag-set-of-an-object-1481833847896", + "title": "To retrieve tag set of an object" } ], "GetObjectTorrent": [ @@ -1570,13 +1570,14 @@ "Body": "filetoupload", "Bucket": "examplebucket", "Key": "exampleobject", - "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1584,19 +1585,22 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" }, { "input": { "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "objectkey" + "Key": "exampleobject", + "ServerSideEncryption": "AES256", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" + "ServerSideEncryption": "AES256", + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1604,20 +1608,19 @@ "output": { } }, - "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-create-an-object-1483147613675", - "title": "To create an object." + "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { - "Body": "c:\\HappyFace.jpg", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1625,22 +1628,19 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", - "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", - "title": "To upload an object and specify optional tags" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Key": "objectkey" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" }, "comments": { "input": { @@ -1648,23 +1648,20 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-create-an-object-1483147613675", + "title": "To create an object." }, { "input": { + "ACL": "authenticated-read", "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "exampleobject" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" }, "comments": { "input": { @@ -1672,20 +1669,20 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", + "title": "To upload an object and specify canned ACL." }, { "input": { - "ACL": "authenticated-read", - "Body": "filetoupload", + "Body": "c:\\HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject" + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" }, "comments": { "input": { @@ -1693,19 +1690,22 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", - "title": "To upload an object and specify canned ACL." + "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", + "title": "To upload an object and specify optional tags" }, { "input": { "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "ServerSideEncryption": "AES256", + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "ServerSideEncryption": "AES256", + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" } ], "PutObjectAcl": [ diff --git a/models/apis/sso-admin/2020-07-20/api-2.json b/models/apis/sso-admin/2020-07-20/api-2.json index f5c043d835a..0f598bca0bb 100644 --- a/models/apis/sso-admin/2020-07-20/api-2.json +++ b/models/apis/sso-admin/2020-07-20/api-2.json @@ -1409,7 +1409,7 @@ "Token":{ "type":"string", "max":2048, - "pattern":"^[-a-zA-Z0-9+=/]*" + "pattern":"^[-a-zA-Z0-9+=/_]*" }, "UUId":{ "type":"string", diff --git a/service/eks/api.go b/service/eks/api.go index 0ae9260e51d..aaa7b9e13d3 100644 --- a/service/eks/api.go +++ b/service/eks/api.go @@ -13,6 +13,110 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opAssociateEncryptionConfig = "AssociateEncryptionConfig" + +// AssociateEncryptionConfigRequest generates a "aws/request.Request" representing the +// client's request for the AssociateEncryptionConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateEncryptionConfig for more information on using the AssociateEncryptionConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateEncryptionConfigRequest method. +// req, resp := client.AssociateEncryptionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/AssociateEncryptionConfig +func (c *EKS) AssociateEncryptionConfigRequest(input *AssociateEncryptionConfigInput) (req *request.Request, output *AssociateEncryptionConfigOutput) { + op := &request.Operation{ + Name: opAssociateEncryptionConfig, + HTTPMethod: "POST", + HTTPPath: "/clusters/{name}/encryption-config/associate", + } + + if input == nil { + input = &AssociateEncryptionConfigInput{} + } + + output = &AssociateEncryptionConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateEncryptionConfig API operation for Amazon Elastic Kubernetes Service. +// +// Associate encryption configuration to an existing cluster. +// +// You can use this API to enable encryption on existing clusters which do not +// have encryption already enabled. This allows you to implement a defense-in-depth +// security strategy without migrating applications to new EKS clusters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's +// API operation AssociateEncryptionConfig for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClientException +// These errors are usually caused by a client action. Actions can include using +// an action or resource on behalf of a user that doesn't have permissions to +// use the action or resource or specifying an identifier that is not valid. +// +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * ResourceInUseException +// The specified resource is in use. +// +// * ResourceNotFoundException +// The specified resource could not be found. You can view your available clusters +// with ListClusters. You can view your available managed node groups with ListNodegroups. +// Amazon EKS clusters and node groups are Region-specific. +// +// * InvalidRequestException +// The request is invalid given the state of the cluster. Check the state of +// the cluster and the associated operations. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/AssociateEncryptionConfig +func (c *EKS) AssociateEncryptionConfig(input *AssociateEncryptionConfigInput) (*AssociateEncryptionConfigOutput, error) { + req, out := c.AssociateEncryptionConfigRequest(input) + return out, req.Send() +} + +// AssociateEncryptionConfigWithContext is the same as AssociateEncryptionConfig with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateEncryptionConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) AssociateEncryptionConfigWithContext(ctx aws.Context, input *AssociateEncryptionConfigInput, opts ...request.Option) (*AssociateEncryptionConfigOutput, error) { + req, out := c.AssociateEncryptionConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateIdentityProviderConfig = "AssociateIdentityProviderConfig" // AssociateIdentityProviderConfigRequest generates a "aws/request.Request" representing the @@ -289,26 +393,9 @@ func (c *EKS) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ // plane via the Kubernetes API server endpoint and a certificate file that // is created for your cluster. // -// You can use the endpointPublicAccess and endpointPrivateAccess parameters -// to enable or disable public and private access to your cluster's Kubernetes -// API server endpoint. By default, public access is enabled, and private access -// is disabled. For more information, see Amazon EKS Cluster Endpoint Access -// Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) -// in the Amazon EKS User Guide . -// -// You can use the logging parameter to enable or disable exporting the Kubernetes -// control plane logs for your cluster to CloudWatch Logs. By default, cluster -// control plane logs aren't exported to CloudWatch Logs. For more information, -// see Amazon EKS Cluster Control Plane Logs (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) -// in the Amazon EKS User Guide . -// -// CloudWatch Logs ingestion, archive storage, and data scanning rates apply -// to exported control plane logs. For more information, see Amazon CloudWatch -// Pricing (http://aws.amazon.com/cloudwatch/pricing/). -// -// Cluster creation typically takes between 10 and 15 minutes. After you create -// an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate -// with the API server and launch nodes into your cluster. For more information, +// Cluster creation typically takes several minutes. After you create an Amazon +// EKS cluster, you must configure your Kubernetes tooling to communicate with +// the API server and launch nodes into your cluster. For more information, // see Managing Cluster Authentication (https://docs.aws.amazon.com/eks/latest/userguide/managing-auth.html) // and Launching Amazon EKS nodes (https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html) // in the Amazon EKS User Guide. @@ -3856,6 +3943,93 @@ func (s *AddonVersionInfo) SetCompatibilities(v []*Compatibility) *AddonVersionI return s } +type AssociateEncryptionConfigInput struct { + _ struct{} `type:"structure"` + + // The client request token you are using with the encryption configuration. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + + // The name of the cluster that you are associating with encryption configuration. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The configuration you are using for encryption. + // + // EncryptionConfig is a required field + EncryptionConfig []*EncryptionConfig `locationName:"encryptionConfig" type:"list" required:"true"` +} + +// String returns the string representation +func (s AssociateEncryptionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEncryptionConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateEncryptionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateEncryptionConfigInput"} + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.ClusterName != nil && len(*s.ClusterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterName", 1)) + } + if s.EncryptionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionConfig")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *AssociateEncryptionConfigInput) SetClientRequestToken(v string) *AssociateEncryptionConfigInput { + s.ClientRequestToken = &v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *AssociateEncryptionConfigInput) SetClusterName(v string) *AssociateEncryptionConfigInput { + s.ClusterName = &v + return s +} + +// SetEncryptionConfig sets the EncryptionConfig field's value. +func (s *AssociateEncryptionConfigInput) SetEncryptionConfig(v []*EncryptionConfig) *AssociateEncryptionConfigInput { + s.EncryptionConfig = v + return s +} + +type AssociateEncryptionConfigOutput struct { + _ struct{} `type:"structure"` + + // An object representing an asynchronous update. + Update *Update `locationName:"update" type:"structure"` +} + +// String returns the string representation +func (s AssociateEncryptionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEncryptionConfigOutput) GoString() string { + return s.String() +} + +// SetUpdate sets the Update field's value. +func (s *AssociateEncryptionConfigOutput) SetUpdate(v *Update) *AssociateEncryptionConfigOutput { + s.Update = v + return s +} + type AssociateIdentityProviderConfigInput struct { _ struct{} `type:"structure"` @@ -10238,6 +10412,9 @@ const ( // UpdateParamTypeIdentityProviderConfig is a UpdateParamType enum value UpdateParamTypeIdentityProviderConfig = "IdentityProviderConfig" + // UpdateParamTypeEncryptionConfig is a UpdateParamType enum value + UpdateParamTypeEncryptionConfig = "EncryptionConfig" + // UpdateParamTypeAddonVersion is a UpdateParamType enum value UpdateParamTypeAddonVersion = "AddonVersion" @@ -10264,6 +10441,7 @@ func UpdateParamType_Values() []string { UpdateParamTypeReleaseVersion, UpdateParamTypePublicAccessCidrs, UpdateParamTypeIdentityProviderConfig, + UpdateParamTypeEncryptionConfig, UpdateParamTypeAddonVersion, UpdateParamTypeServiceAccountRoleArn, UpdateParamTypeResolveConflicts, @@ -10313,6 +10491,9 @@ const ( // UpdateTypeDisassociateIdentityProviderConfig is a UpdateType enum value UpdateTypeDisassociateIdentityProviderConfig = "DisassociateIdentityProviderConfig" + // UpdateTypeAssociateEncryptionConfig is a UpdateType enum value + UpdateTypeAssociateEncryptionConfig = "AssociateEncryptionConfig" + // UpdateTypeAddonUpdate is a UpdateType enum value UpdateTypeAddonUpdate = "AddonUpdate" ) @@ -10326,6 +10507,7 @@ func UpdateType_Values() []string { UpdateTypeConfigUpdate, UpdateTypeAssociateIdentityProviderConfig, UpdateTypeDisassociateIdentityProviderConfig, + UpdateTypeAssociateEncryptionConfig, UpdateTypeAddonUpdate, } } diff --git a/service/eks/eksiface/interface.go b/service/eks/eksiface/interface.go index 003202260e8..8f258a93024 100644 --- a/service/eks/eksiface/interface.go +++ b/service/eks/eksiface/interface.go @@ -26,7 +26,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon Elastic Kubernetes Service. // func myFunc(svc eksiface.EKSAPI) bool { -// // Make svc.AssociateIdentityProviderConfig request +// // Make svc.AssociateEncryptionConfig request // } // // func main() { @@ -42,7 +42,7 @@ import ( // type mockEKSClient struct { // eksiface.EKSAPI // } -// func (m *mockEKSClient) AssociateIdentityProviderConfig(input *eks.AssociateIdentityProviderConfigInput) (*eks.AssociateIdentityProviderConfigOutput, error) { +// func (m *mockEKSClient) AssociateEncryptionConfig(input *eks.AssociateEncryptionConfigInput) (*eks.AssociateEncryptionConfigOutput, error) { // // mock response/functionality // } // @@ -60,6 +60,10 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type EKSAPI interface { + AssociateEncryptionConfig(*eks.AssociateEncryptionConfigInput) (*eks.AssociateEncryptionConfigOutput, error) + AssociateEncryptionConfigWithContext(aws.Context, *eks.AssociateEncryptionConfigInput, ...request.Option) (*eks.AssociateEncryptionConfigOutput, error) + AssociateEncryptionConfigRequest(*eks.AssociateEncryptionConfigInput) (*request.Request, *eks.AssociateEncryptionConfigOutput) + AssociateIdentityProviderConfig(*eks.AssociateIdentityProviderConfigInput) (*eks.AssociateIdentityProviderConfigOutput, error) AssociateIdentityProviderConfigWithContext(aws.Context, *eks.AssociateIdentityProviderConfigInput, ...request.Option) (*eks.AssociateIdentityProviderConfigOutput, error) AssociateIdentityProviderConfigRequest(*eks.AssociateIdentityProviderConfigInput) (*request.Request, *eks.AssociateIdentityProviderConfigOutput) diff --git a/service/emr/api.go b/service/emr/api.go index c6cac9f4720..e95be405d2d 100644 --- a/service/emr/api.go +++ b/service/emr/api.go @@ -71,7 +71,8 @@ func (c *EMR) AddInstanceFleetRequest(input *AddInstanceFleetInput) (req *reques // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -338,7 +339,8 @@ func (c *EMR) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -508,7 +510,8 @@ func (c *EMR) CreateSecurityConfigurationRequest(input *CreateSecurityConfigurat // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -579,10 +582,6 @@ func (c *EMR) CreateStudioRequest(input *CreateStudioInput) (req *request.Reques // CreateStudio API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Creates a new Amazon EMR Studio. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -594,7 +593,8 @@ func (c *EMR) CreateStudioRequest(input *CreateStudioInput) (req *request.Reques // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -666,10 +666,6 @@ func (c *EMR) CreateStudioSessionMappingRequest(input *CreateStudioSessionMappin // CreateStudioSessionMapping API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Maps a user or group to the Amazon EMR Studio specified by StudioId, and // applies a session policy to refine Studio permissions for that user or group. // @@ -766,7 +762,8 @@ func (c *EMR) DeleteSecurityConfigurationRequest(input *DeleteSecurityConfigurat // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -838,10 +835,6 @@ func (c *EMR) DeleteStudioRequest(input *DeleteStudioInput) (req *request.Reques // DeleteStudio API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Removes an Amazon EMR Studio from the Studio metadata store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -853,7 +846,8 @@ func (c *EMR) DeleteStudioRequest(input *DeleteStudioInput) (req *request.Reques // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -925,10 +919,6 @@ func (c *EMR) DeleteStudioSessionMappingRequest(input *DeleteStudioSessionMappin // DeleteStudioSessionMapping API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Removes a user or group from an Amazon EMR Studio. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1024,7 +1014,8 @@ func (c *EMR) DescribeClusterRequest(input *DescribeClusterInput) (req *request. // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1298,7 +1289,8 @@ func (c *EMR) DescribeSecurityConfigurationRequest(input *DescribeSecurityConfig // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1380,7 +1372,8 @@ func (c *EMR) DescribeStepRequest(input *DescribeStepInput) (req *request.Reques // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1451,10 +1444,6 @@ func (c *EMR) DescribeStudioRequest(input *DescribeStudioInput) (req *request.Re // DescribeStudio API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Returns details for the specified Amazon EMR Studio including ID, Name, VPC, // Studio access URL, and so on. // @@ -1467,7 +1456,8 @@ func (c *EMR) DescribeStudioRequest(input *DescribeStudioInput) (req *request.Re // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1552,7 +1542,8 @@ func (c *EMR) GetBlockPublicAccessConfigurationRequest(input *GetBlockPublicAcce // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1697,10 +1688,6 @@ func (c *EMR) GetStudioSessionMappingRequest(input *GetStudioSessionMappingInput // GetStudioSessionMapping API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Fetches mapping details for the specified Amazon EMR Studio and identity // (user or group). // @@ -1802,7 +1789,8 @@ func (c *EMR) ListBootstrapActionsRequest(input *ListBootstrapActionsInput) (req // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -1946,7 +1934,8 @@ func (c *EMR) ListClustersRequest(input *ListClustersInput) (req *request.Reques // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2089,7 +2078,8 @@ func (c *EMR) ListInstanceFleetsRequest(input *ListInstanceFleetsInput) (req *re // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2229,7 +2219,8 @@ func (c *EMR) ListInstanceGroupsRequest(input *ListInstanceGroupsInput) (req *re // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2372,7 +2363,8 @@ func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Requ // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2659,7 +2651,8 @@ func (c *EMR) ListSecurityConfigurationsRequest(input *ListSecurityConfiguration // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2790,7 +2783,7 @@ func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, out // // Provides a list of steps for the cluster in reverse order unless you specify // stepIds with the request of filter by StepStates. You can specify a maximum -// of ten stepIDs. +// of 10 stepIDs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2801,7 +2794,8 @@ func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, out // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -2930,12 +2924,8 @@ func (c *EMR) ListStudioSessionMappingsRequest(input *ListStudioSessionMappingsI // ListStudioSessionMappings API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// -// Returns a list of all user or group session mappings for the EMR Studio specified -// by StudioId. +// Returns a list of all user or group session mappings for the Amazon EMR Studio +// specified by StudioId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3076,10 +3066,6 @@ func (c *EMR) ListStudiosRequest(input *ListStudiosInput) (req *request.Request, // ListStudios API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Returns a list of all Amazon EMR Studios associated with the AWS account. // The list includes details such as ID, Studio Access URL, and creation time // for each Studio. @@ -3093,7 +3079,8 @@ func (c *EMR) ListStudiosRequest(input *ListStudiosInput) (req *request.Request, // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -3317,7 +3304,8 @@ func (c *EMR) ModifyInstanceFleetRequest(input *ModifyInstanceFleetInput) (req * // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -3564,7 +3552,8 @@ func (c *EMR) PutBlockPublicAccessConfigurationRequest(input *PutBlockPublicAcce // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -3880,7 +3869,8 @@ func (c *EMR) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, o // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -4254,7 +4244,8 @@ func (c *EMR) StartNotebookExecutionRequest(input *StartNotebookExecutionInput) // // Returned Error Types: // * InternalServerException -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. // // * InvalidRequestException // This exception occurs when there is something wrong with user input. @@ -4455,6 +4446,91 @@ func (c *EMR) TerminateJobFlowsWithContext(ctx aws.Context, input *TerminateJobF return out, req.Send() } +const opUpdateStudio = "UpdateStudio" + +// UpdateStudioRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStudio operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateStudio for more information on using the UpdateStudio +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateStudioRequest method. +// req, resp := client.UpdateStudioRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/UpdateStudio +func (c *EMR) UpdateStudioRequest(input *UpdateStudioInput) (req *request.Request, output *UpdateStudioOutput) { + op := &request.Operation{ + Name: opUpdateStudio, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStudioInput{} + } + + output = &UpdateStudioOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateStudio API operation for Amazon Elastic MapReduce. +// +// Updates an Amazon EMR Studio configuration, including attributes such as +// name, description, and subnets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation UpdateStudio for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// This exception occurs when there is an internal failure in the Amazon EMR +// service. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/UpdateStudio +func (c *EMR) UpdateStudio(input *UpdateStudioInput) (*UpdateStudioOutput, error) { + req, out := c.UpdateStudioRequest(input) + return out, req.Send() +} + +// UpdateStudioWithContext is the same as UpdateStudio with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStudio for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) UpdateStudioWithContext(ctx aws.Context, input *UpdateStudioInput, opts ...request.Option) (*UpdateStudioOutput, error) { + req, out := c.UpdateStudioRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateStudioSessionMapping = "UpdateStudioSessionMapping" // UpdateStudioSessionMappingRequest generates a "aws/request.Request" representing the @@ -4500,10 +4576,6 @@ func (c *EMR) UpdateStudioSessionMappingRequest(input *UpdateStudioSessionMappin // UpdateStudioSessionMapping API operation for Amazon Elastic MapReduce. // -// -// The Amazon EMR Studio APIs are in preview release for Amazon EMR and are -// subject to change. -// // Updates the session policy attached to the user or group for the specified // Amazon EMR Studio. // @@ -6445,12 +6517,14 @@ type CreateStudioInput struct { // AuthMode is a required field AuthMode *string `type:"string" required:"true" enum:"AuthMode"` - // The default Amazon S3 location to back up EMR Studio Workspaces and notebook - // files. A Studio user can select an alternative Amazon S3 location when creating - // a Workspace. - DefaultS3Location *string `type:"string"` + // The default Amazon S3 location to back up Amazon EMR Studio Workspaces and + // notebook files. A Studio user can select an alternative Amazon S3 location + // when creating a Workspace. + // + // DefaultS3Location is a required field + DefaultS3Location *string `type:"string" required:"true"` - // A detailed description of the Studio. + // A detailed description of the Amazon EMR Studio. Description *string `type:"string"` // The ID of the Amazon EMR Studio Engine security group. The Engine security @@ -6471,21 +6545,21 @@ type CreateStudioInput struct { // ServiceRole is a required field ServiceRole *string `type:"string" required:"true"` - // A list of subnet IDs to associate with the Studio. The subnets must belong - // to the VPC specified by VpcId. Studio users can create a Workspace in any - // of the specified subnets. + // A list of subnet IDs to associate with the Amazon EMR Studio. A Studio can + // have a maximum of 5 subnets. The subnets must belong to the VPC specified + // by VpcId. Studio users can create a Workspace in any of the specified subnets. // // SubnetIds is a required field SubnetIds []*string `type:"list" required:"true"` - // A list of tags to associate with the Studio. Tags are user-defined key-value - // pairs that consist of a required key string with a maximum of 128 characters, - // and an optional value string with a maximum of 256 characters. + // A list of tags to associate with the Amazon EMR Studio. Tags are user-defined + // key-value pairs that consist of a required key string with a maximum of 128 + // characters, and an optional value string with a maximum of 256 characters. Tags []*Tag `type:"list"` - // The IAM user role that will be assumed by users and groups logged in to a - // Studio. The permissions attached to this IAM role can be scoped down for - // each user or group using session policies. + // The IAM user role that will be assumed by users and groups logged in to an + // Amazon EMR Studio. The permissions attached to this IAM role can be scoped + // down for each user or group using session policies. // // UserRole is a required field UserRole *string `type:"string" required:"true"` @@ -6520,6 +6594,9 @@ func (s *CreateStudioInput) Validate() error { if s.AuthMode == nil { invalidParams.Add(request.NewErrParamRequired("AuthMode")) } + if s.DefaultS3Location == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultS3Location")) + } if s.EngineSecurityGroupId == nil { invalidParams.Add(request.NewErrParamRequired("EngineSecurityGroupId")) } @@ -6656,13 +6733,14 @@ type CreateStudioSessionMappingInput struct { // must be specified. IdentityId *string `type:"string"` - // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId // must be specified. IdentityName *string `type:"string"` - // Specifies whether the identity to map to the Studio is a user or a group. + // Specifies whether the identity to map to the Amazon EMR Studio is a user + // or a group. // // IdentityType is a required field IdentityType *string `type:"string" required:"true" enum:"IdentityType"` @@ -6867,14 +6945,15 @@ type DeleteStudioSessionMappingInput struct { // must be specified. IdentityId *string `type:"string"` - // The name of the user name or group to remove from the Studio. For more information, - // see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // The name of the user name or group to remove from the Amazon EMR Studio. + // For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId // must be specified. IdentityName *string `type:"string"` - // Specifies whether the identity to delete from the Studio is a user or a group. + // Specifies whether the identity to delete from the Amazon EMR Studio is a + // user or a group. // // IdentityType is a required field IdentityType *string `type:"string" required:"true" enum:"IdentityType"` @@ -7929,7 +8008,7 @@ type GetStudioSessionMappingInput struct { IdentityId *string `type:"string"` // The name of the user or group to fetch. For more information, see UserName - // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId // must be specified. @@ -8040,7 +8119,7 @@ type HadoopJarStepConfig struct { MainClass *string `type:"string"` // A list of Java properties that are set when the step runs. You can use these - // properties to pass key value pairs to your main function. + // properties to pass key-value pairs to your main function. Properties []*KeyValue `type:"list"` } @@ -8332,8 +8411,8 @@ type InstanceFleet struct { TargetOnDemandCapacity *int64 `type:"integer"` // The target capacity of Spot units for the instance fleet, which determines - // how many Spot instances to provision. When the instance fleet launches, Amazon - // EMR tries to provision Spot instances as specified by InstanceTypeConfig. + // how many Spot Instances to provision. When the instance fleet launches, Amazon + // EMR tries to provision Spot Instances as specified by InstanceTypeConfig. // Each instance configuration has a specified WeightedCapacity. When a Spot // instance is provisioned, the WeightedCapacity units count toward the target // capacity. Amazon EMR provisions instances until the target capacity is totally @@ -8344,7 +8423,7 @@ type InstanceFleet struct { // to determine the Spot capacity units that have been provisioned for the instance // fleet. // - // If not specified or set to 0, only On-Demand instances are provisioned for + // If not specified or set to 0, only On-Demand Instances are provisioned for // the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity // should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity // and TargetOnDemandCapacity can be specified, and its value must be 1. @@ -8428,8 +8507,8 @@ func (s *InstanceFleet) SetTargetSpotCapacity(v int64) *InstanceFleet { type InstanceFleetConfig struct { _ struct{} `type:"structure"` - // The node type that the instance fleet hosts. Valid values are MASTER,CORE,and - // TASK. + // The node type that the instance fleet hosts. Valid values are MASTER, CORE, + // and TASK. // // InstanceFleetType is a required field InstanceFleetType *string `type:"string" required:"true" enum:"InstanceFleetType"` @@ -9023,9 +9102,9 @@ type InstanceGroupConfig struct { // of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicy `type:"structure"` - // The bid price for each EC2 Spot Instance type as defined by InstanceType. - // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice - // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed + // in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, + // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` // @@ -9162,9 +9241,9 @@ func (s *InstanceGroupConfig) SetName(v string) *InstanceGroupConfig { type InstanceGroupDetail struct { _ struct{} `type:"structure"` - // The bid price for each EC2 Spot Instance type as defined by InstanceType. - // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice - // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed + // in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, + // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` // The date/time the instance group was created. @@ -9926,7 +10005,8 @@ func (s *InternalServerError) RequestID() string { return s.RespMetadata.RequestID } -// This exception occurs when there is an internal failure in the EMR service. +// This exception occurs when there is an internal failure in the Amazon EMR +// service. type InternalServerException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -11594,8 +11674,8 @@ type ListStudioSessionMappingsOutput struct { Marker *string `type:"string"` // A list of session mapping summary objects. Each object includes session mapping - // details such as creation time, identity type (user or group), and Studio - // ID. + // details such as creation time, identity type (user or group), and Amazon + // EMR Studio ID. SessionMappings []*SessionMappingSummary `type:"list"` } @@ -12506,7 +12586,7 @@ type PutAutoScalingPolicyOutput struct { // The automatic scaling policy definition. AutoScalingPolicy *AutoScalingPolicyDescription `type:"structure"` - // The Amazon Resource Name of the cluster. + // The Amazon Resource Name (ARN) of the cluster. ClusterArn *string `min:"20" type:"string"` // Specifies the ID of a cluster. The instance group to which the automatic @@ -12984,7 +13064,7 @@ type RunJobFlowInput struct { // // * "mapr-m7" - launch the cluster using MapR M7 Edition. // - // * "hunk" - launch the cluster with the Hunk Big Data Analtics Platform. + // * "hunk" - launch the cluster with the Hunk Big Data Analytics Platform. // // * "hue"- launch the cluster with Hue installed. // @@ -13301,10 +13381,10 @@ func (s *RunJobFlowInput) SetVisibleToAllUsers(v bool) *RunJobFlowInput { type RunJobFlowOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name of the cluster. + // The Amazon Resource Name (ARN) of the cluster. ClusterArn *string `min:"20" type:"string"` - // An unique identifier for the job flow. + // A unique identifier for the job flow. JobFlowId *string `type:"string"` } @@ -13674,12 +13754,13 @@ type SessionMappingDetail struct { // The globally unique identifier (GUID) of the user or group. IdentityId *string `type:"string"` - // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. IdentityName *string `type:"string"` - // Specifies whether the identity mapped to the Studio is a user or a group. + // Specifies whether the identity mapped to the Amazon EMR Studio is a user + // or a group. IdentityType *string `type:"string" enum:"IdentityType"` // The time the session mapping was last modified. @@ -13757,12 +13838,13 @@ type SessionMappingSummary struct { // Identity Store. IdentityId *string `type:"string"` - // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. IdentityName *string `type:"string"` - // Specifies whether the identity mapped to the Studio is a user or a group. + // Specifies whether the identity mapped to the Amazon EMR Studio is a user + // or a group. IdentityType *string `type:"string" enum:"IdentityType"` // The Amazon Resource Name (ARN) of the session policy associated with the @@ -14090,7 +14172,7 @@ type SpotProvisioningSpecification struct { // The defined duration for Spot Instances (also known as Spot blocks) in minutes. // When specified, the Spot Instance does not terminate before the defined duration - // expires, and defined duration pricing for Spot instances applies. Valid values + // expires, and defined duration pricing for Spot Instances applies. Valid values // are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as // a Spot Instance receives its instance ID. At the end of the duration, Amazon // EC2 marks the Spot Instance for termination and provides a Spot Instance @@ -14800,8 +14882,8 @@ func (s StopNotebookExecutionOutput) GoString() string { type Studio struct { _ struct{} `type:"structure"` - // Specifies whether the Studio authenticates users using single sign-on (SSO) - // or IAM. + // Specifies whether the Amazon EMR Studio authenticates users using single + // sign-on (SSO) or IAM. AuthMode *string `type:"string" enum:"AuthMode"` // The time the Amazon EMR Studio was created. @@ -14811,7 +14893,7 @@ type Studio struct { // notebook files. DefaultS3Location *string `type:"string"` - // The detailed description of the EMR Studio. + // The detailed description of the Amazon EMR Studio. Description *string `type:"string"` // The ID of the Engine security group associated with the Amazon EMR Studio. @@ -14819,16 +14901,16 @@ type Studio struct { // the Workspace security group. EngineSecurityGroupId *string `type:"string"` - // The name of the EMR Studio. + // The name of the Amazon EMR Studio. Name *string `type:"string"` // The name of the IAM role assumed by the Amazon EMR Studio. ServiceRole *string `type:"string"` - // The Amazon Resource Name (ARN) of the EMR Studio. + // The Amazon Resource Name (ARN) of the Amazon EMR Studio. StudioArn *string `type:"string"` - // The ID of the EMR Studio. + // The ID of the Amazon EMR Studio. StudioId *string `type:"string"` // The list of IDs of the subnets associated with the Amazon EMR Studio. @@ -14843,7 +14925,7 @@ type Studio struct { // The name of the IAM role assumed by users logged in to the Amazon EMR Studio. UserRole *string `type:"string"` - // The ID of the VPC associated with the EMR Studio. + // The ID of the VPC associated with the Amazon EMR Studio. VpcId *string `type:"string"` // The ID of the Workspace security group associated with the Amazon EMR Studio. @@ -14961,7 +15043,7 @@ type StudioSummary struct { // The time when the Amazon EMR Studio was created. CreationTime *time.Time `type:"timestamp"` - // The detailed description of the EMR Studio. + // The detailed description of the Amazon EMR Studio. Description *string `type:"string"` // The name of the Amazon EMR Studio. @@ -15024,7 +15106,7 @@ func (s *StudioSummary) SetVpcId(v string) *StudioSummary { return s } -// The list of supported product configurations which allow user-supplied arguments. +// The list of supported product configurations that allow user-supplied arguments. // EMR accepts these arguments and forwards them to the corresponding installation // script as bootstrap action arguments. type SupportedProductConfig struct { @@ -15150,6 +15232,100 @@ func (s TerminateJobFlowsOutput) GoString() string { return s.String() } +type UpdateStudioInput struct { + _ struct{} `type:"structure"` + + // A default Amazon S3 location to back up Workspaces and notebook files for + // the Amazon EMR Studio. A Studio user can select an alternative Amazon S3 + // location when creating a Workspace. + DefaultS3Location *string `type:"string"` + + // A detailed description to assign to the Amazon EMR Studio. + Description *string `type:"string"` + + // A descriptive name for the Amazon EMR Studio. + Name *string `type:"string"` + + // The ID of the Amazon EMR Studio to update. + // + // StudioId is a required field + StudioId *string `type:"string" required:"true"` + + // A list of subnet IDs to associate with the Amazon EMR Studio. The list can + // include new subnet IDs, but must also include all of the subnet IDs previously + // associated with the Studio. The list order does not matter. A Studio can + // have a maximum of 5 subnets. The subnets must belong to the same VPC as the + // Studio. + SubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s UpdateStudioInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStudioInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStudioInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStudioInput"} + if s.StudioId == nil { + invalidParams.Add(request.NewErrParamRequired("StudioId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultS3Location sets the DefaultS3Location field's value. +func (s *UpdateStudioInput) SetDefaultS3Location(v string) *UpdateStudioInput { + s.DefaultS3Location = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateStudioInput) SetDescription(v string) *UpdateStudioInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateStudioInput) SetName(v string) *UpdateStudioInput { + s.Name = &v + return s +} + +// SetStudioId sets the StudioId field's value. +func (s *UpdateStudioInput) SetStudioId(v string) *UpdateStudioInput { + s.StudioId = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *UpdateStudioInput) SetSubnetIds(v []*string) *UpdateStudioInput { + s.SubnetIds = v + return s +} + +type UpdateStudioOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateStudioOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStudioOutput) GoString() string { + return s.String() +} + type UpdateStudioSessionMappingInput struct { _ struct{} `type:"structure"` @@ -15161,7 +15337,7 @@ type UpdateStudioSessionMappingInput struct { IdentityId *string `type:"string"` // The name of the user or group to update. For more information, see UserName - // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId // must be specified. @@ -15178,7 +15354,7 @@ type UpdateStudioSessionMappingInput struct { // SessionPolicyArn is a required field SessionPolicyArn *string `type:"string" required:"true"` - // The ID of the EMR Studio. + // The ID of the Amazon EMR Studio. // // StudioId is a required field StudioId *string `type:"string" required:"true"` diff --git a/service/emr/emriface/interface.go b/service/emr/emriface/interface.go index a5402ea6091..c7a7a580662 100644 --- a/service/emr/emriface/interface.go +++ b/service/emr/emriface/interface.go @@ -270,6 +270,10 @@ type EMRAPI interface { TerminateJobFlowsWithContext(aws.Context, *emr.TerminateJobFlowsInput, ...request.Option) (*emr.TerminateJobFlowsOutput, error) TerminateJobFlowsRequest(*emr.TerminateJobFlowsInput) (*request.Request, *emr.TerminateJobFlowsOutput) + UpdateStudio(*emr.UpdateStudioInput) (*emr.UpdateStudioOutput, error) + UpdateStudioWithContext(aws.Context, *emr.UpdateStudioInput, ...request.Option) (*emr.UpdateStudioOutput, error) + UpdateStudioRequest(*emr.UpdateStudioInput) (*request.Request, *emr.UpdateStudioOutput) + UpdateStudioSessionMapping(*emr.UpdateStudioSessionMappingInput) (*emr.UpdateStudioSessionMappingOutput, error) UpdateStudioSessionMappingWithContext(aws.Context, *emr.UpdateStudioSessionMappingInput, ...request.Option) (*emr.UpdateStudioSessionMappingOutput, error) UpdateStudioSessionMappingRequest(*emr.UpdateStudioSessionMappingInput) (*request.Request, *emr.UpdateStudioSessionMappingOutput) diff --git a/service/emr/errors.go b/service/emr/errors.go index e831146649c..33bdf7db1ee 100644 --- a/service/emr/errors.go +++ b/service/emr/errors.go @@ -18,7 +18,8 @@ const ( // ErrCodeInternalServerException for service response error code // "InternalServerException". // - // This exception occurs when there is an internal failure in the EMR service. + // This exception occurs when there is an internal failure in the Amazon EMR + // service. ErrCodeInternalServerException = "InternalServerException" // ErrCodeInvalidRequestException for service response error code diff --git a/service/s3/api.go b/service/s3/api.go index 89a0a29afff..cc1f3dbf52e 100644 --- a/service/s3/api.go +++ b/service/s3/api.go @@ -2852,9 +2852,12 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the default encryption configuration for an Amazon S3 bucket. For -// information about the Amazon S3 default encryption feature, see Amazon S3 -// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Returns the default encryption configuration for an Amazon S3 bucket. If +// the bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. +// +// For information about the Amazon S3 default encryption feature, see Amazon +// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). // // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner @@ -5042,6 +5045,8 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // // * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5302,8 +5307,12 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // // This operation is useful to determine if a bucket exists and you have permission // to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. Otherwise, the operation might return responses -// such as 404 Not Found and 403 Forbidden. +// have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. // // To use this operation, you must have permissions to perform the s3:ListBucket // action. The bucket owner has this permission by default and can grant this @@ -5394,7 +5403,9 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // A HEAD request has the same options as a GET operation on an object. The // response is identical to the GET response except that there is no response -// body. +// body. Because of this, if the HEAD request generates an error, it returns +// a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve +// the exact exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5409,11 +5420,14 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). // -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon +// S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. // // Request headers are limited to 8 KB in size. For more information, see Common // Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). @@ -6482,7 +6496,8 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // the request parameters as selection criteria to return a subset of the objects // in a bucket. A 200 OK response can contain valid or invalid XML. Make sure // to design your application to parse the contents of the response and handle -// it appropriately. +// it appropriately. Objects are returned sorted in an ascending order of the +// respective key names in the list. // // To use this operation, you must have READ access to the bucket. // @@ -7415,7 +7430,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access @@ -7442,6 +7458,22 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // // * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) // +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration +// bucket permission to set the configuration on the bucket. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9751,6 +9783,8 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -12341,6 +12375,10 @@ type Condition struct { // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). KeyPrefixEquals *string `type:"string"` } @@ -13083,10 +13121,10 @@ type CopyObjectResult struct { // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. The source and destination ETag - // is identical for a successfully copied object. + // is identical for a successfully copied non-multipart object. ETag *string `type:"string"` - // Returns the date that the object was last modified. + // Creation date of the object. LastModified *time.Time `type:"timestamp"` } @@ -15667,7 +15705,8 @@ type DeleteObjectTaggingInput struct { // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Name of the object key. + // The key that identifies the object in the bucket from which to remove all + // tags. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -16821,6 +16860,10 @@ type ErrorDocument struct { // The object key name to use when a 4XX class error occurs. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Key is a required field Key *string `min:"1" type:"string" required:"true"` } @@ -19565,14 +19608,14 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - // Specifies the algorithm to use to when encrypting the object (for example, + // Specifies the algorithm to use to when decrypting the object (for example, // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -20103,7 +20146,7 @@ type GetObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -20570,6 +20613,13 @@ type GetObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -20631,6 +20681,12 @@ func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetVersionId sets the VersionId field's value. func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { s.VersionId = &v @@ -21555,7 +21611,7 @@ type HeadObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -21881,6 +21937,10 @@ type IndexDocument struct { // with the key name images/index.html) The suffix must not be empty and must // not include a slash character. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Suffix is a required field Suffix *string `type:"string" required:"true"` } @@ -22164,6 +22224,10 @@ type IntelligentTieringFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container of a key value name pair. @@ -22911,6 +22975,10 @@ type LifecycleRule struct { // Prefix identifying one or more objects to which the rule applies. This is // No longer used; use Filter instead. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -23073,6 +23141,10 @@ type LifecycleRuleFilter struct { And *LifecycleRuleAndOperator `type:"structure"` // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // This tag must exist in the object's tag set in order for the rule to apply. @@ -24579,8 +24651,8 @@ func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { type ListObjectsOutput struct { _ struct{} `type:"structure"` - // All of the keys rolled up in a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24891,8 +24963,8 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { type ListObjectsV2Output struct { _ struct{} `type:"structure"` - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24936,8 +25008,8 @@ type ListObjectsV2Output struct { IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys KeyCount *int64 `type:"integer"` // Sets the maximum number of keys returned in the response. By default the @@ -26195,7 +26267,7 @@ type Object struct { // the object. Key *string `min:"1" type:"string"` - // The date the Object was Last Modified + // Creation date of the object. LastModified *time.Time `type:"timestamp"` // The owner of the object @@ -26258,7 +26330,11 @@ func (s *Object) SetStorageClass(v string) *Object { type ObjectIdentifier struct { _ struct{} `type:"structure"` - // Key name of the object to delete. + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -30815,6 +30891,13 @@ type PutObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Container for the TagSet and Tag elements // // Tagging is a required field @@ -30889,6 +30972,12 @@ func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetTagging sets the Tagging field's value. func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { s.Tagging = v @@ -31275,11 +31364,19 @@ type Redirect struct { // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required // if one of the siblings is present. Can be present only if ReplaceKeyWith // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyWith *string `type:"string"` } @@ -31529,6 +31626,10 @@ type ReplicationRule struct { // the rule applies. The maximum prefix length is 1,024 characters. To include // all objects in a bucket, specify an empty string. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -31737,6 +31838,10 @@ type ReplicationRuleFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -32332,6 +32437,10 @@ type Rule struct { // Object key prefix that identifies one or more objects to which this rule // applies. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Prefix is a required field Prefix *string `type:"string" required:"true"` diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go index 26abe5f4c25..b06a7eb280b 100644 --- a/service/s3/examples_test.go +++ b/service/s3/examples_test.go @@ -527,11 +527,11 @@ func ExampleS3_DeleteObjectTagging_shared01() { fmt.Println(result) } -// To delete multiple object versions from a versioned bucket +// To delete multiple objects from a versioned bucket // -// The following example deletes objects from a bucket. The request specifies object -// versions. S3 deletes specific object versions and returns the key and versions of -// deleted objects in the response. +// The following example deletes objects from a bucket. The bucket is versioned, and +// the request does not specify the object version to delete. In this case, all versions +// remain in the bucket and S3 adds a delete marker. func ExampleS3_DeleteObjects_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectsInput{ @@ -539,12 +539,10 @@ func ExampleS3_DeleteObjects_shared00() { Delete: &s3.Delete{ Objects: []*s3.ObjectIdentifier{ { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), + Key: aws.String("objectkey1"), }, { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), + Key: aws.String("objectkey2"), }, }, Quiet: aws.Bool(false), @@ -569,11 +567,11 @@ func ExampleS3_DeleteObjects_shared00() { fmt.Println(result) } -// To delete multiple objects from a versioned bucket +// To delete multiple object versions from a versioned bucket // -// The following example deletes objects from a bucket. The bucket is versioned, and -// the request does not specify the object version to delete. In this case, all versions -// remain in the bucket and S3 adds a delete marker. +// The following example deletes objects from a bucket. The request specifies object +// versions. S3 deletes specific object versions and returns the key and versions of +// deleted objects in the response. func ExampleS3_DeleteObjects_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectsInput{ @@ -581,10 +579,12 @@ func ExampleS3_DeleteObjects_shared01() { Delete: &s3.Delete{ Objects: []*s3.ObjectIdentifier{ { - Key: aws.String("objectkey1"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), }, { - Key: aws.String("objectkey2"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), }, }, Quiet: aws.Bool(false), @@ -934,16 +934,14 @@ func ExampleS3_GetBucketWebsite_shared00() { fmt.Println(result) } -// To retrieve a byte range of an object +// To retrieve an object // -// The following example retrieves an object for an S3 bucket. The request specifies -// the range header to retrieve a specific byte range. +// The following example retrieves an object for an S3 bucket. func ExampleS3_GetObject_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("SampleFile.txt"), - Range: aws.String("bytes=0-9"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObject(input) @@ -968,14 +966,16 @@ func ExampleS3_GetObject_shared00() { fmt.Println(result) } -// To retrieve an object +// To retrieve a byte range of an object // -// The following example retrieves an object for an S3 bucket. +// The following example retrieves an object for an S3 bucket. The request specifies +// the range header to retrieve a specific byte range. func ExampleS3_GetObject_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("SampleFile.txt"), + Range: aws.String("bytes=0-9"), } result, err := svc.GetObject(input) @@ -1030,14 +1030,16 @@ func ExampleS3_GetObjectAcl_shared00() { fmt.Println(result) } -// To retrieve tag set of an object +// To retrieve tag set of a specific object version // -// The following example retrieves tag set of an object. +// The following example retrieves tag set of an object. The request specifies object +// version. func ExampleS3_GetObjectTagging_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), } result, err := svc.GetObjectTagging(input) @@ -1058,16 +1060,14 @@ func ExampleS3_GetObjectTagging_shared00() { fmt.Println(result) } -// To retrieve tag set of a specific object version +// To retrieve tag set of an object // -// The following example retrieves tag set of an object. The request specifies object -// version. +// The following example retrieves tag set of an object. func ExampleS3_GetObjectTagging_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObjectTagging(input) @@ -1808,19 +1808,20 @@ func ExampleS3_PutBucketWebsite_shared00() { fmt.Println(result) } -// To upload an object and specify server-side encryption and object tags +// To upload object and specify user-defined metadata // -// The following example uploads and object. The request specifies the optional server-side -// encryption option. The request also specifies optional object tags. If the bucket -// is versioning enabled, S3 returns version ID in response. +// The following example creates an object. The request also specifies optional metadata. +// If the bucket is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared00() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - ServerSideEncryption: aws.String("AES256"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + Metadata: map[string]*string{ + "metadata1": aws.String("value1"), + "metadata2": aws.String("value2"), + }, } result, err := svc.PutObject(input) @@ -1841,16 +1842,19 @@ func ExampleS3_PutObject_shared00() { fmt.Println(result) } -// To create an object. +// To upload an object and specify server-side encryption and object tags // -// The following example creates an object. If the bucket is versioning enabled, S3 -// returns version ID in response. +// The following example uploads and object. The request specifies the optional server-side +// encryption option. The request also specifies optional object tags. If the bucket +// is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared01() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + ServerSideEncryption: aws.String("AES256"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1871,17 +1875,17 @@ func ExampleS3_PutObject_shared01() { fmt.Println(result) } -// To upload an object and specify optional tags +// To upload an object // -// The following example uploads an object. The request specifies optional object tags. -// The bucket is versioned, therefore S3 returns version ID of the newly created object. +// The following example uploads an object to a versioning-enabled bucket. The source +// file is specified using Windows file syntax. S3 returns VersionId of the newly created +// object. func ExampleS3_PutObject_shared02() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.PutObject(input) @@ -1902,18 +1906,16 @@ func ExampleS3_PutObject_shared02() { fmt.Println(result) } -// To upload an object (specify optional headers) +// To create an object. // -// The following example uploads an object. The request specifies optional request headers -// to directs S3 to use specific storage class and use server-side encryption. +// The following example creates an object. If the bucket is versioning enabled, S3 +// returns version ID in response. func ExampleS3_PutObject_shared03() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - ServerSideEncryption: aws.String("AES256"), - StorageClass: aws.String("STANDARD_IA"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("objectkey"), } result, err := svc.PutObject(input) @@ -1934,20 +1936,18 @@ func ExampleS3_PutObject_shared03() { fmt.Println(result) } -// To upload object and specify user-defined metadata +// To upload an object and specify canned ACL. // -// The following example creates an object. The request also specifies optional metadata. -// If the bucket is versioning enabled, S3 returns version ID in response. +// The following example uploads and object. The request specifies optional canned ACL +// (access control list) to all READ access to authenticated users. If the bucket is +// versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared04() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ + ACL: aws.String("authenticated-read"), Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), Key: aws.String("exampleobject"), - Metadata: map[string]*string{ - "metadata1": aws.String("value1"), - "metadata2": aws.String("value2"), - }, } result, err := svc.PutObject(input) @@ -1968,18 +1968,17 @@ func ExampleS3_PutObject_shared04() { fmt.Println(result) } -// To upload an object and specify canned ACL. +// To upload an object and specify optional tags // -// The following example uploads and object. The request specifies optional canned ACL -// (access control list) to all READ access to authenticated users. If the bucket is -// versioning enabled, S3 returns version ID in response. +// The following example uploads an object. The request specifies optional object tags. +// The bucket is versioned, therefore S3 returns version ID of the newly created object. func ExampleS3_PutObject_shared05() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - ACL: aws.String("authenticated-read"), - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), + Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -2000,17 +1999,18 @@ func ExampleS3_PutObject_shared05() { fmt.Println(result) } -// To upload an object +// To upload an object (specify optional headers) // -// The following example uploads an object to a versioning-enabled bucket. The source -// file is specified using Windows file syntax. S3 returns VersionId of the newly created -// object. +// The following example uploads an object. The request specifies optional request headers +// to directs S3 to use specific storage class and use server-side encryption. func ExampleS3_PutObject_shared06() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + ServerSideEncryption: aws.String("AES256"), + StorageClass: aws.String("STANDARD_IA"), } result, err := svc.PutObject(input)