From 64d68069388c74915c2f32559749b9b99756b9d2 Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Thu, 15 Feb 2018 16:17:19 -0800 Subject: [PATCH 01/11] Decouple signature version of S3 addressing style Botocore currently couples `s3v4` signing and path style addressing. Using the older `s3` signature version results in the `auto` style addressing (virtual hosted by default with a fallback to path style if needed). Support for `s3v4` was added in b30a39356 in support for the new `cn-north-1` region. The virtual hosting logic didn't work with the new `.aws.cn` TLD so the code was updated to just use path style addressing when using `s3v4`. This created an inconsistency in the default behavior where if you're using the old signature version we'll use virtual hosted addressing, but if you use s3v4 you get path style by default. Combine this with some regions only supporting s3v4, additional edge cases such as us-gov-west-1, and dual stack and accelerate endpoints, the logic was becoming hard to follow. The new logic is simpler because it's decoupled from region and signature version. We'll try to use virtual hosted addressing by default, and fall back to path style if necessary. --- botocore/utils.py | 26 ++++++------- tests/functional/test_s3.py | 66 ++++++++++++++++++++++++++------ tests/unit/test_s3_addressing.py | 2 +- 3 files changed, 67 insertions(+), 27 deletions(-) diff --git a/botocore/utils.py b/botocore/utils.py index 107dcce7b9..6c8cf4bc7b 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -682,12 +682,7 @@ def fix_s3_host(request, signature_version, region_name, addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ - # By default we do not use virtual hosted style addressing when - # signed with signature version 4. - if signature_version is not botocore.UNSIGNED and \ - 's3v4' in signature_version: - return - elif not _allowed_region(region_name): + if not _allowed_region(region_name): return try: switch_to_virtual_host_style( @@ -904,14 +899,17 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): error = response[1].get('Error', {}) error_code = error.get('Code') - if error_code == '301': - # A raw 301 error might be returned for several reasons, but we - # only want to try to redirect it if it's a HeadObject or - # HeadBucket because all other operations will return - # PermanentRedirect if region is incorrect. - if operation.name not in ['HeadObject', 'HeadBucket']: - return - elif error_code != 'PermanentRedirect': + is_raw_redirect = ( + error_code == '301' and + operation.name in ['HeadObject', 'HeadBucket'] + ) + is_wrong_signing_region = ( + error_code == 'AuthorizationHeaderMalformed' and + 'Region' in error + ) + is_permanent_redirect = error_code == 'PermanentRedirect' + if not any([is_raw_redirect, is_wrong_signing_region, + is_permanent_redirect]): return bucket = request_dict['context']['signing']['bucket'] diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index f9ceaacbd8..8885b77452 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -223,7 +223,10 @@ class TestRegionRedirect(BaseS3OperationTest): def setUp(self): super(TestRegionRedirect, self).setUp() self.client = self.session.create_client( - 's3', 'us-west-2', config=Config(signature_version='s3v4')) + 's3', 'us-west-2', config=Config( + signature_version='s3v4', + s3={'addressing_style': 'path'}, + )) self.redirect_response = mock.Mock() self.redirect_response.headers = { @@ -242,6 +245,23 @@ def setUp(self): b' foo.s3.eu-central-1.amazonaws.com' b'') + self.bad_signing_region_response = mock.Mock() + self.bad_signing_region_response.headers = { + 'x-amz-bucket-region': 'eu-central-1' + } + self.bad_signing_region_response.status_code = 400 + self.bad_signing_region_response.content = ( + b'' + b'' + b' AuthorizationHeaderMalformed' + b' the region us-west-2 is wrong; ' + b'expecting eu-central-1' + b' eu-central-1' + b' BD9AA1730D454E39' + b' ' + b'' + ) + self.success_response = mock.Mock() self.success_response.headers = {} self.success_response.status_code = 200 @@ -295,6 +315,29 @@ def test_region_redirect_cache(self): self.assertEqual(calls[1].url, fixed_url) self.assertEqual(calls[2].url, fixed_url) + def test_resign_request_with_region_when_needed(self): + self.http_session_send_mock.side_effect = [ + self.bad_signing_region_response, self.success_response, + ] + + # Create a client with no explicit configuration so we can + # verify the default behavior. + client = self.session.create_client( + 's3', 'us-west-2') + first_response = client.list_objects(Bucket='foo') + self.assertEqual( + first_response['ResponseMetadata']['HTTPStatusCode'], 200) + + self.assertEqual(self.http_session_send_mock.call_count, 2) + calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] + initial_url = ('https://foo.s3.amazonaws.com/' + '?encoding-type=url') + self.assertEqual(calls[0].url, initial_url) + + fixed_url = ('https://foo.s3.amazonaws.com/' + '?encoding-type=url') + self.assertEqual(calls[1].url, fixed_url) + class TestGeneratePresigned(BaseS3OperationTest): def test_generate_unauthed_url(self): @@ -414,31 +457,30 @@ def test_correct_url_used_for_s3(): signature_version='s3', is_secure=False, expected_url='http://bucket.s3.amazonaws.com/key') - # The default behavior for sigv4. DNS compatible buckets still get path - # style addresses. + # Virtual host addressing is independent of signature version. yield t.case(region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', expected_url=( - 'https://s3.us-west-2.amazonaws.com/bucket/key')) + 'https://bucket.s3.amazonaws.com/key')) yield t.case(region='us-east-1', bucket='bucket', key='key', signature_version='s3v4', - expected_url='https://s3.amazonaws.com/bucket/key') + expected_url='https://bucket.s3.amazonaws.com/key') yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', expected_url=( - 'https://s3.us-west-1.amazonaws.com/bucket/key')) + 'https://bucket.s3.amazonaws.com/key')) yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', is_secure=False, expected_url=( - 'http://s3.us-west-1.amazonaws.com/bucket/key')) + 'http://bucket.s3.amazonaws.com/key')) # Regions outside of the 'aws' partition. - # We're expecting path style because this is the default with - # 's3v4'. + # These should still default to virtual hosted addressing + # unless explicitly configured otherwise. yield t.case(region='cn-north-1', bucket='bucket', key='key', signature_version='s3v4', expected_url=( - 'https://s3.cn-north-1.amazonaws.com.cn/bucket/key')) + 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) # This isn't actually supported because cn-north-1 is sigv4 only, # but we'll still double check that our internal logic is correct # when building the expected url. @@ -608,11 +650,11 @@ def test_correct_url_used_for_s3(): yield t.case( region='us-east-1', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3v4', - expected_url='https://s3.dualstack.us-east-1.amazonaws.com/bucket/key') + expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') yield t.case( region='us-west-2', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3v4', - expected_url='https://s3.dualstack.us-west-2.amazonaws.com/bucket/key') + expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') # Non DNS compatible buckets use path style for dual stack. yield t.case( region='us-west-2', bucket='bucket.dot', key='key', diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py index 050fe1b517..8001665854 100644 --- a/tests/unit/test_s3_addressing.py +++ b/tests/unit/test_s3_addressing.py @@ -81,7 +81,7 @@ def test_list_objects_unicode_query_string_eu_central_1(self): prepared_request = self.get_prepared_request('list_objects', params) self.assertEqual( prepared_request.url, - ('https://s3.eu-central-1.amazonaws.com/safename' + ('https://safename.s3.amazonaws.com/' '?marker=%C3%A4%C3%B6%C3%BC-01.txt') ) From 39f27e47d129f3b37b58154583e04aff2c989b4f Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Thu, 15 Feb 2018 22:10:17 -0800 Subject: [PATCH 02/11] Remove hardcoded 's3.amazonaws.com' for virtual hosted addressing This removes the hard coded references to 's3.amazonaws.com' when using the virtual hosted addressing mode of S3 and instead uses regionalized endpoints when converting to virtual hosted addressing. For example, given a bucket 'foo' in us-west-2 and a key 'bar', we would convert the URL from `s3.us-west-2.amazonaws.com/foo/bar` to `foo.s3.amazonaws.com/bar`. With this change we'll now convert the URL to `foo.s3.us-west-2.amazonaws.com/bar`. When the initial code for 's3.amazonaws.com' was first added to botocore, it provided a number of benefits: 1. You could avoid a 301 response by using `.s3.amazonaws.com`. This is because the DNS would resolve to an endpoint in the correct region, and the older signature version `s3` didn't include a region name as part of its signing process. The end result is that a user did not have to correctly configure a region for an S3 bucket, they'd automatically get to the correct region due to the DNS resolution. 2. The 301 PermanentRedirect responses did not include any structured data about the correct region to use so it wasn't easy to know which region you _should_ be sending the request to. As a result, 301 responses weren't automatically handled and ended up just raising an exception back to the user. Since this code was first introduced there were several things that have changed: 1. The introduction of the `s3v4` signature version, which requires a correct region name as part of its signature. Signing a request with the wrong region results in a 400 response. As a result, it didn't matter if `foo.s3.amazonaws.com` got you to the right region, if the request was _signed_ with the wrong region, you'd get a 400 response. 2. The 301 response (as well as most responses from S3) contain request metadata that tell you which region a bucket is in. This means that it's now possible to automatically handle 301 responses because we know which region to send the request to. 3. The introduction of various partitions outside of the `aws` partition, such as `aws-cn` meant there were other TLDs we needed to handle. The "hack" put in place in botocore was to just disable virtual hosted addressing in certain scenarios. Given all this, it makes sense to no longer hardcode `s3.amazonaws.com` when converting to virtual hosted addressing. There's already a growing number of edge cases where we have to disable this, and most importantly it's not needed anymore. --- botocore/utils.py | 2 +- tests/functional/test_s3.py | 20 ++++++++++---------- tests/integration/test_s3.py | 14 +++++++------- tests/unit/test_s3_addressing.py | 4 ++-- tests/unit/test_utils.py | 6 +++--- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/botocore/utils.py b/botocore/utils.py index 6c8cf4bc7b..0a84020f17 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -672,7 +672,7 @@ def check_dns_name(bucket_name): def fix_s3_host(request, signature_version, region_name, - default_endpoint_url='s3.amazonaws.com', **kwargs): + default_endpoint_url=None, **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 8885b77452..805dd709c4 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -330,11 +330,11 @@ def test_resign_request_with_region_when_needed(self): self.assertEqual(self.http_session_send_mock.call_count, 2) calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] - initial_url = ('https://foo.s3.amazonaws.com/' + initial_url = ('https://foo.s3.us-west-2.amazonaws.com/' '?encoding-type=url') self.assertEqual(calls[0].url, initial_url) - fixed_url = ('https://foo.s3.amazonaws.com/' + fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/' '?encoding-type=url') self.assertEqual(calls[1].url, fixed_url) @@ -349,7 +349,7 @@ def test_generate_unauthed_url(self): 'Bucket': 'foo', 'Key': 'bar' }) - self.assertEqual(url, 'https://foo.s3.amazonaws.com/bar') + self.assertEqual(url, 'https://foo.s3.us-west-2.amazonaws.com/bar') def test_generate_unauthed_post(self): config = Config(signature_version=botocore.UNSIGNED) @@ -357,7 +357,7 @@ def test_generate_unauthed_post(self): parts = client.generate_presigned_post(Bucket='foo', Key='bar') expected = { 'fields': {'key': 'bar'}, - 'url': 'https://foo.s3.amazonaws.com/' + 'url': 'https://foo.s3.us-west-2.amazonaws.com/' } self.assertEqual(parts, expected) @@ -446,33 +446,33 @@ def test_correct_url_used_for_s3(): # The default behavior for sigv2. DNS compatible buckets yield t.case(region='us-west-2', bucket='bucket', key='key', signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') + expected_url='https://bucket.s3.us-west-2.amazonaws.com/key') yield t.case(region='us-east-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.amazonaws.com/key') yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') + expected_url='https://bucket.s3.us-west-1.amazonaws.com/key') yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3', is_secure=False, - expected_url='http://bucket.s3.amazonaws.com/key') + expected_url='http://bucket.s3.us-west-1.amazonaws.com/key') # Virtual host addressing is independent of signature version. yield t.case(region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', expected_url=( - 'https://bucket.s3.amazonaws.com/key')) + 'https://bucket.s3.us-west-2.amazonaws.com/key')) yield t.case(region='us-east-1', bucket='bucket', key='key', signature_version='s3v4', expected_url='https://bucket.s3.amazonaws.com/key') yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', expected_url=( - 'https://bucket.s3.amazonaws.com/key')) + 'https://bucket.s3.us-west-1.amazonaws.com/key')) yield t.case(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', is_secure=False, expected_url=( - 'http://bucket.s3.amazonaws.com/key')) + 'http://bucket.s3.us-west-1.amazonaws.com/key')) # Regions outside of the 'aws' partition. # These should still default to virtual hosted addressing diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 88b2bf9445..aa1aeb9552 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -582,7 +582,7 @@ def test_presign_sigv4(self): 'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key}) self.assertTrue( presigned_url.startswith( - 'https://s3.amazonaws.com/%s/%s' % ( + 'https://%s.s3.amazonaws.com/%s' % ( self.bucket_name, self.key)), "Host was suppose to be the us-east-1 endpoint, instead " "got: %s" % presigned_url) @@ -647,7 +647,7 @@ def test_presign_post_sigv4(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://s3.amazonaws.com/%s' % self.bucket_name), + 'https://%s.s3.amazonaws.com/' % self.bucket_name), "Host was suppose to use us-east-1 endpoint, instead " "got: %s" % post_args['url']) @@ -671,8 +671,8 @@ def test_presign_sigv2(self): 'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key}) self.assertTrue( presigned_url.startswith( - 'https://%s.s3.amazonaws.com/%s' % ( - self.bucket_name, self.key)), + 'https://%s.s3.%s.amazonaws.com/%s' % ( + self.bucket_name, self.region, self.key)), "Host was suppose to use DNS style, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. @@ -687,7 +687,7 @@ def test_presign_sigv4(self): self.assertTrue( presigned_url.startswith( - 'https://s3.us-west-2.amazonaws.com/%s/%s' % ( + 'https://%s.s3.us-west-2.amazonaws.com/%s' % ( self.bucket_name, self.key)), "Host was suppose to be the us-west-2 endpoint, instead " "got: %s" % presigned_url) @@ -715,7 +715,7 @@ def test_presign_post_sigv2(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.amazonaws.com' % self.bucket_name), + 'https://%s.s3.us-west-2.amazonaws.com' % self.bucket_name), "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) @@ -748,7 +748,7 @@ def test_presign_post_sigv4(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://s3.us-west-2.amazonaws.com/%s' % self.bucket_name), + 'https://%s.s3.us-west-2.amazonaws.com/' % self.bucket_name), "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py index 8001665854..60279224b8 100644 --- a/tests/unit/test_s3_addressing.py +++ b/tests/unit/test_s3_addressing.py @@ -72,7 +72,7 @@ def test_list_objects_dns_name_non_classic(self): prepared_request = self.get_prepared_request('list_objects', params, force_hmacv1=True) self.assertEqual(prepared_request.url, - 'https://safename.s3.amazonaws.com/') + 'https://safename.s3.us-west-2.amazonaws.com/') def test_list_objects_unicode_query_string_eu_central_1(self): self.region_name = 'eu-central-1' @@ -81,7 +81,7 @@ def test_list_objects_unicode_query_string_eu_central_1(self): prepared_request = self.get_prepared_request('list_objects', params) self.assertEqual( prepared_request.url, - ('https://safename.s3.amazonaws.com/' + ('https://safename.s3.eu-central-1.amazonaws.com/' '?marker=%C3%A4%C3%B6%C3%BC-01.txt') ) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index c1cc1b9c31..a0b9202bf5 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -677,13 +677,13 @@ def test_fix_s3_host_initial(self): request=request, signature_version=signature_version, region_name=region_name) self.assertEqual(request.url, - 'https://bucket.s3.amazonaws.com/key.txt') + 'https://bucket.s3-us-west-2.amazonaws.com/key.txt') self.assertEqual(request.auth_path, '/bucket/key.txt') def test_fix_s3_host_only_applied_once(self): request = AWSRequest( method='PUT', headers={}, - url='https://s3-us-west-2.amazonaws.com/bucket/key.txt' + url='https://s3.us-west-2.amazonaws.com/bucket/key.txt' ) region_name = 'us-west-2' signature_version = 's3' @@ -695,7 +695,7 @@ def test_fix_s3_host_only_applied_once(self): request=request, signature_version=signature_version, region_name=region_name) self.assertEqual(request.url, - 'https://bucket.s3.amazonaws.com/key.txt') + 'https://bucket.s3.us-west-2.amazonaws.com/key.txt') # This was a bug previously. We want to make sure that # calling fix_s3_host() again does not alter the auth_path. # Otherwise we'll get signature errors. From 7aa6c94486738b6714dfdd635019be67b08c18bc Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Thu, 15 Feb 2018 22:32:16 -0800 Subject: [PATCH 03/11] Remove special case govcloud handling Similar to cn-north-1 (though implemented differently), using a govcloud region would disable virtual hosted addressing. This removes the whole "allowed regions" concept in the fix_s3_host code. Now the addressing style logic is completely separate of both signature version and region. --- botocore/client.py | 10 ---------- botocore/utils.py | 14 ++------------ tests/functional/test_s3.py | 5 ++--- tests/unit/test_s3_addressing.py | 4 ++-- 4 files changed, 6 insertions(+), 27 deletions(-) diff --git a/botocore/client.py b/botocore/client.py index 18b0243f57..662b7b8e43 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -189,16 +189,6 @@ def _get_s3_addressing_handler(self, endpoint_url, s3_config, logger.debug("Defaulting to S3 virtual host style addressing with " "path style addressing fallback.") - # For dual stack mode, we need to clear the default endpoint url in - # order to use the existing netloc if the bucket is dns compatible. - # Also, the default_endpoint_url of 's3.amazonaws.com' only works - # if we're in the 'aws' partition. Anywhere else we should - # just use the existing netloc. - if s3_config.get('use_dualstack_endpoint', False) or \ - partition != 'aws': - return functools.partial( - fix_s3_host, default_endpoint_url=None) - # By default, try to use virtual style with path fallback. return fix_s3_host diff --git a/botocore/utils.py b/botocore/utils.py index 0a84020f17..d46a743c0e 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -41,10 +41,6 @@ # Based on rfc2986, section 2.3 SAFE_CHARS = '-._~' LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]') -RESTRICTED_REGIONS = [ - 'us-gov-west-1', - 'fips-us-gov-west-1', -] RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError) S3_ACCELERATE_WHITELIST = ['dualstack'] @@ -679,11 +675,9 @@ def fix_s3_host(request, signature_version, region_name, ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` - addressing. This allows us to avoid 301 redirects for all - bucket names that can be CNAME'd. + addressing. + """ - if not _allowed_region(region_name): - return try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) @@ -760,10 +754,6 @@ def _is_get_bucket_location_request(request): return request.url.endswith('?location') -def _allowed_region(region_name): - return region_name not in RESTRICTED_REGIONS - - def instance_cache(func): """Method decorator for caching method calls to a single instance. diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 805dd709c4..2ef79d5e5a 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -564,15 +564,14 @@ def test_correct_url_used_for_s3(): s3_config=virtual_hosting, expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') - # Test restricted regions not do virtual host by default yield t.case( region='us-gov-west-1', bucket='bucket', key='key', signature_version='s3', - expected_url='https://s3.us-gov-west-1.amazonaws.com/bucket/key') + expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') yield t.case( region='fips-us-gov-west-1', bucket='bucket', key='key', signature_version='s3', - expected_url='https://s3-fips-us-gov-west-1.amazonaws.com/bucket/key') + expected_url='https://bucket.s3-fips-us-gov-west-1.amazonaws.com/key') # Test path style addressing. diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py index 60279224b8..b39dddb8b7 100644 --- a/tests/unit/test_s3_addressing.py +++ b/tests/unit/test_s3_addressing.py @@ -91,7 +91,7 @@ def test_list_objects_in_restricted_regions(self): prepared_request = self.get_prepared_request('list_objects', params) # Note how we keep the region specific endpoint here. self.assertEqual(prepared_request.url, - 'https://s3.us-gov-west-1.amazonaws.com/safename') + 'https://safename.s3.us-gov-west-1.amazonaws.com/') def test_list_objects_in_fips(self): self.region_name = 'fips-us-gov-west-1' @@ -100,7 +100,7 @@ def test_list_objects_in_fips(self): # Note how we keep the region specific endpoint here. self.assertEqual( prepared_request.url, - 'https://s3-fips-us-gov-west-1.amazonaws.com/safename') + 'https://safename.s3-fips-us-gov-west-1.amazonaws.com/') def test_list_objects_non_dns_name_non_classic(self): self.region_name = 'us-west-2' From 690f8e022300bcb14af204cc71f9b1b6d54eb591 Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Fri, 16 Feb 2018 12:26:12 -0800 Subject: [PATCH 04/11] Account for 400 response from HeadBucket/HeadObject When you sign a HeadBucket/HeadObject request with the wrong region you'll get a 400 response with no body (expected for HEAD requests). We need to update the special casing for these operations to also catch this case and redirect to the new region appropriately. --- botocore/utils.py | 10 +++++++--- tests/unit/test_utils.py | 20 ++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/botocore/utils.py b/botocore/utils.py index d46a743c0e..39febf9486 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -889,8 +889,12 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): error = response[1].get('Error', {}) error_code = error.get('Code') - is_raw_redirect = ( - error_code == '301' and + # We have to account for 400 responses because + # if we sign a Head* request with the wrong region, + # we'll get a 400 Bad Request but we won't get a + # body saying it's an "AuthorizationHeaderMalformed". + is_special_head_object = ( + error_code in ['301', '400'] and operation.name in ['HeadObject', 'HeadBucket'] ) is_wrong_signing_region = ( @@ -898,7 +902,7 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): 'Region' in error ) is_permanent_redirect = error_code == 'PermanentRedirect' - if not any([is_raw_redirect, is_wrong_signing_region, + if not any([is_special_head_object, is_wrong_signing_region, is_permanent_redirect]): return diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index a0b9202bf5..150849fe44 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1393,6 +1393,26 @@ def test_redirects_301(self): request_dict, response, self.operation) self.assertIsNone(redirect_response) + def test_redirects_400_head_bucket(self): + request_dict = {'url': 'https://us-west-2.amazonaws.com/foo', + 'context': {'signing': {'bucket': 'foo'}}} + response = (None, { + 'Error': {'Code': '400', 'Message': 'Bad Request'}, + 'ResponseMetadata': { + 'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'} + } + }) + + self.operation.name = 'HeadObject' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation) + self.assertEqual(redirect_response, 0) + + self.operation.name = 'ListObjects' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation) + self.assertIsNone(redirect_response) + def test_does_not_redirect_if_None_response(self): request_dict = {'url': 'https://us-west-2.amazonaws.com/foo', 'context': {'signing': {'bucket': 'foo'}}} From 9f7b999bf15418dd1a68d8fb970f03a501fea9e9 Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Tue, 20 Feb 2018 15:50:25 -0800 Subject: [PATCH 05/11] Use global endpoint for s3 presigning This puts back the original behavior to use the global endpoint for s3 presigning for backwards compatibility. --- botocore/signers.py | 9 ++- botocore/utils.py | 3 + tests/functional/test_s3.py | 116 ++++++++++++++++++++++++++++++++++- tests/integration/test_s3.py | 17 +++-- 4 files changed, 136 insertions(+), 9 deletions(-) diff --git a/botocore/signers.py b/botocore/signers.py index 79ac731557..fd392ce7f1 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -558,7 +558,8 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, expires_in = ExpiresIn http_method = HttpMethod context = { - 'is_presign_request': True + 'is_presign_request': True, + 'partition': self.meta.partition, } request_signer = self._request_signer @@ -586,6 +587,8 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, prepare_request_dict( request_dict, endpoint_url=self.meta.endpoint_url, context=context) + request_dict['context']['partition'] = self.meta.partition + # Generate the presigned url. return request_signer.generate_presigned_url( request_dict=request_dict, expires_in=expires_in, @@ -686,7 +689,9 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, # Prepare the request dict by including the client's endpoint url. prepare_request_dict( - request_dict, endpoint_url=self.meta.endpoint_url) + request_dict, endpoint_url=self.meta.endpoint_url, + context={'is_presign_request': True, 'partition': self.meta.partition}, + ) # Append that the bucket name to the list of conditions. conditions.append({'bucket': bucket}) diff --git a/botocore/utils.py b/botocore/utils.py index 39febf9486..eddbae0d68 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -678,6 +678,9 @@ def fix_s3_host(request, signature_version, region_name, addressing. """ + if request.context.get('is_presign_request', False): + if request.context.get('partition', '') == 'aws': + default_endpoint_url = 's3.amazonaws.com' try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 2ef79d5e5a..f80df0f351 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -15,6 +15,7 @@ import botocore.session from botocore.config import Config +from botocore.compat import urlsplit from botocore.exceptions import ParamValidationError from botocore import UNSIGNED @@ -349,7 +350,7 @@ def test_generate_unauthed_url(self): 'Bucket': 'foo', 'Key': 'bar' }) - self.assertEqual(url, 'https://foo.s3.us-west-2.amazonaws.com/bar') + self.assertEqual(url, 'https://foo.s3.amazonaws.com/bar') def test_generate_unauthed_post(self): config = Config(signature_version=botocore.UNSIGNED) @@ -357,7 +358,7 @@ def test_generate_unauthed_post(self): parts = client.generate_presigned_post(Bucket='foo', Key='bar') expected = { 'fields': {'key': 'bar'}, - 'url': 'https://foo.s3.us-west-2.amazonaws.com/' + 'url': 'https://foo.s3.amazonaws.com/' } self.assertEqual(parts, expected) @@ -775,6 +776,7 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config, environ['AWS_ACCESS_KEY_ID'] = 'access_key' environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' environ['AWS_CONFIG_FILE'] = 'no-exist-foo' + environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' session = create_session() session.config_filename = 'no-exist-foo' config = Config( @@ -790,3 +792,113 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config, Key=key, Body=b'bar') request_sent = mock_send.call_args[0][0] assert_equal(request_sent.url, expected_url) + + +def _create_s3_client(region, is_secure, endpoint_url, s3_config, + signature_version): + environ = {} + with mock.patch('os.environ', environ): + environ['AWS_ACCESS_KEY_ID'] = 'access_key' + environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' + environ['AWS_CONFIG_FILE'] = 'no-exist-foo' + environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' + session = create_session() + session.config_filename = 'no-exist-foo' + config = Config( + signature_version=signature_version, + s3=s3_config + ) + s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, + config=config, + endpoint_url=endpoint_url) + return s3 + + +def test_addressing_for_presigned_urls(): + # See TestGeneratePresigned for more detailed test cases + # on presigned URLs. Here's we're just focusing on the + # adddressing mode used for presigned URLs. + # We special case presigned URLs due to backwards + # compatibility. + t = S3AddressingCases(_verify_presigned_url_addressing) + + # us-east-1, or the "global" endpoint. A signature version of + # None means the user doesn't have signature version configured. + yield t.case(region='us-east-1', bucket='bucket', key='key', + signature_version=None, + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-1', bucket='bucket', key='key', + signature_version='s3', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-1', bucket='bucket', key='key', + signature_version='s3v4', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-1', bucket='bucket', key='key', + signature_version='s3v4', + s3_config={'addressing_style': 'path'}, + expected_url='https://s3.amazonaws.com/bucket/key') + + # A region that supports both 's3' and 's3v4'. + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version=None, + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version='s3', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version='s3v4', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version='s3v4', + s3_config={'addressing_style': 'path'}, + expected_url='https://s3.us-west-2.amazonaws.com/bucket/key') + + # An 's3v4' only region. + yield t.case(region='us-east-2', bucket='bucket', key='key', + signature_version=None, + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-2', bucket='bucket', key='key', + signature_version='s3', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-2', bucket='bucket', key='key', + signature_version='s3v4', + expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case(region='us-east-2', bucket='bucket', key='key', + signature_version='s3v4', + s3_config={'addressing_style': 'path'}, + expected_url='https://s3.us-east-2.amazonaws.com/bucket/key') + + # Dualstack endpoints + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version=None, + s3_config={'use_dualstack_endpoint': True}, + expected_url='https://bucket.s3.amazonaws.com/key') + + # Accelerate + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version=None, + s3_config={'use_accelerate_endpoint': True}, + expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + + # A region that we don't know about. + yield t.case(region='us-west-50', bucket='bucket', key='key', + signature_version=None, + expected_url='https://bucket.s3.amazonaws.com/key') + + +def _verify_presigned_url_addressing(region, bucket, key, s3_config, + is_secure=True, + customer_provided_endpoint=None, + expected_url=None, + signature_version=None): + s3 = _create_s3_client(region=region, is_secure=is_secure, + endpoint_url=customer_provided_endpoint, + s3_config=s3_config, + signature_version=signature_version) + url = s3.generate_presigned_url( + 'get_object', {'Bucket': bucket, 'Key': key}) + # We're not trying to verify the params for URL presigning, + # those are tested elsewhere. We just care about the hostname/path. + parts = urlsplit(url) + actual = '%s://%s%s' % parts[:3] + assert_equal(actual, expected_url) diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index aa1aeb9552..2d15b19418 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -671,15 +671,22 @@ def test_presign_sigv2(self): 'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key}) self.assertTrue( presigned_url.startswith( - 'https://%s.s3.%s.amazonaws.com/%s' % ( - self.bucket_name, self.region, self.key)), + 'https://%s.s3.amazonaws.com/%s' % ( + self.bucket_name, self.key)), "Host was suppose to use DNS style, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. self.assertEqual(requests.get(presigned_url).content, b'foo') def test_presign_sigv4(self): + # For a newly created bucket, you can't use virtualhosted + # addressing and 's3v4' due to the backwards compat behavior + # using '.s3.amazonaws.com' for anything in the AWS partition. + # Instead you either have to use the older 's3' signature version + # of you have to use path style addressing. The latter is being + # done here. self.client_config.signature_version = 's3v4' + self.client_config.s3 = {'addressing_style': 'path'} self.client = self.session.create_client( 's3', config=self.client_config) presigned_url = self.client.generate_presigned_url( @@ -687,7 +694,7 @@ def test_presign_sigv4(self): self.assertTrue( presigned_url.startswith( - 'https://%s.s3.us-west-2.amazonaws.com/%s' % ( + 'https://s3.us-west-2.amazonaws.com/%s/%s' % ( self.bucket_name, self.key)), "Host was suppose to be the us-west-2 endpoint, instead " "got: %s" % presigned_url) @@ -715,7 +722,7 @@ def test_presign_post_sigv2(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.us-west-2.amazonaws.com' % self.bucket_name), + 'https://%s.s3.amazonaws.com' % self.bucket_name), "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) @@ -748,7 +755,7 @@ def test_presign_post_sigv4(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.us-west-2.amazonaws.com/' % self.bucket_name), + 'https://%s.s3.amazonaws.com/' % self.bucket_name), "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) From 0f237c018b2f20ef7c49e29507fae84aa8bea9d0 Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Thu, 22 Feb 2018 17:00:58 -0800 Subject: [PATCH 06/11] Use regional endpoint for dualstack endpoints As part of this change I moved out the logic for whether to use a global endpoint for fix_s3_host() into the presigner code, which is where the special casing happens anyways. --- botocore/signers.py | 18 ++++++++++++++---- botocore/utils.py | 5 ++--- tests/functional/test_s3.py | 25 +++++++++++++++++++++---- 3 files changed, 37 insertions(+), 11 deletions(-) diff --git a/botocore/signers.py b/botocore/signers.py index fd392ce7f1..be75584966 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -559,7 +559,7 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, http_method = HttpMethod context = { 'is_presign_request': True, - 'partition': self.meta.partition, + 'use_global_endpoint': _should_use_global_endpoint(self), } request_signer = self._request_signer @@ -587,8 +587,6 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, prepare_request_dict( request_dict, endpoint_url=self.meta.endpoint_url, context=context) - request_dict['context']['partition'] = self.meta.partition - # Generate the presigned url. return request_signer.generate_presigned_url( request_dict=request_dict, expires_in=expires_in, @@ -690,7 +688,10 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, # Prepare the request dict by including the client's endpoint url. prepare_request_dict( request_dict, endpoint_url=self.meta.endpoint_url, - context={'is_presign_request': True, 'partition': self.meta.partition}, + context={ + 'is_presign_request': True, + 'use_global_endpoint': _should_use_global_endpoint(self), + }, ) # Append that the bucket name to the list of conditions. @@ -709,3 +710,12 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, return post_presigner.generate_presigned_post( request_dict=request_dict, fields=fields, conditions=conditions, expires_in=expires_in) + + +def _should_use_global_endpoint(client): + use_dualstack_endpoint = False + if client.meta.config.s3 is not None: + use_dualstack_endpoint = client.meta.config.s3.get( + 'use_dualstack_endpoint', False) + return (client.meta.partition == 'aws' and + not use_dualstack_endpoint) diff --git a/botocore/utils.py b/botocore/utils.py index eddbae0d68..56ab9c9433 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -678,9 +678,8 @@ def fix_s3_host(request, signature_version, region_name, addressing. """ - if request.context.get('is_presign_request', False): - if request.context.get('partition', '') == 'aws': - default_endpoint_url = 's3.amazonaws.com' + if request.context.get('use_global_endpoint', False): + default_endpoint_url = 's3.amazonaws.com' try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index f80df0f351..061b5ef20f 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -869,10 +869,21 @@ def test_addressing_for_presigned_urls(): expected_url='https://s3.us-east-2.amazonaws.com/bucket/key') # Dualstack endpoints - yield t.case(region='us-west-2', bucket='bucket', key='key', - signature_version=None, - s3_config={'use_dualstack_endpoint': True}, - expected_url='https://bucket.s3.amazonaws.com/key') + yield t.case( + region='us-west-2', bucket='bucket', key='key', + signature_version=None, + s3_config={'use_dualstack_endpoint': True}, + expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + yield t.case( + region='us-west-2', bucket='bucket', key='key', + signature_version='s3', + s3_config={'use_dualstack_endpoint': True}, + expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + yield t.case( + region='us-west-2', bucket='bucket', key='key', + signature_version='s3v4', + s3_config={'use_dualstack_endpoint': True}, + expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') # Accelerate yield t.case(region='us-west-2', bucket='bucket', key='key', @@ -885,6 +896,12 @@ def test_addressing_for_presigned_urls(): signature_version=None, expected_url='https://bucket.s3.amazonaws.com/key') + # Customer provided URL results in us leaving the host untouched. + yield t.case(region='us-west-2', bucket='bucket', key='key', + signature_version=None, + customer_provided_endpoint='https://foo.com/', + expected_url='https://foo.com/bucket/key') + def _verify_presigned_url_addressing(region, bucket, key, s3_config, is_secure=True, From d13c6657307264e04976c4128568b41504511ea9 Mon Sep 17 00:00:00 2001 From: James Saryerwinnie Date: Fri, 23 Feb 2018 13:31:59 -0800 Subject: [PATCH 07/11] Add changelog entry for #1387 --- .changes/next-release/feature-s3-58965.json | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changes/next-release/feature-s3-58965.json diff --git a/.changes/next-release/feature-s3-58965.json b/.changes/next-release/feature-s3-58965.json new file mode 100644 index 0000000000..007cb711e1 --- /dev/null +++ b/.changes/next-release/feature-s3-58965.json @@ -0,0 +1,5 @@ +{ + "type": "feature", + "category": "``s3``", + "description": "Default to virtual hosted addressing regardless of signature version (boto/botocore`#1387 `__)" +} From 23bc8a56094e40c5527724d5bd88e1364dbc724d Mon Sep 17 00:00:00 2001 From: stephenwithph Date: Sat, 3 Feb 2018 12:56:17 -0800 Subject: [PATCH 08/11] support setting response metadata when adding client error to stub --- botocore/stub.py | 11 ++++++++++- tests/unit/test_stub.py | 19 ++++++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/botocore/stub.py b/botocore/stub.py index 68f5a16756..c6c6a642f9 100644 --- a/botocore/stub.py +++ b/botocore/stub.py @@ -250,7 +250,8 @@ def _add_response(self, method, service_response, expected_params): def add_client_error(self, method, service_error_code='', service_message='', http_status_code=400, - service_error_meta=None, expected_params=None): + service_error_meta=None, expected_params=None, + response_meta=None): """ Adds a ``ClientError`` to the response queue. @@ -278,6 +279,11 @@ def add_client_error(self, method, service_error_code='', any of the parameters differ a ``StubResponseError`` is thrown. You can use stub.ANY to indicate a particular parameter to ignore in validation. + + :param response_meta: Additional keys to be added to the + response's ResponseMetadata + :type response_meta: dict + """ http_response = Response() http_response.status_code = http_status_code @@ -296,6 +302,9 @@ def add_client_error(self, method, service_error_code='', if service_error_meta is not None: parsed_response['Error'].update(service_error_meta) + if response_meta is not None: + parsed_response['ResponseMetadata'].update(response_meta) + operation_name = self.client.meta.method_to_api_mapping.get(method) # Note that we do not allow for expected_params while # adding errors into the queue yet. diff --git a/tests/unit/test_stub.py b/tests/unit/test_stub.py index 1d3a30efce..fbc4063340 100644 --- a/tests/unit/test_stub.py +++ b/tests/unit/test_stub.py @@ -159,7 +159,7 @@ def test_get_client_error_response(self): self.assertEqual(response[1]['Error']['Message'], service_message) self.assertEqual(response[1]['Error']['Code'], error_code) - def test_get_client_error_with_extra_keys(self): + def test_get_client_error_with_extra_error_meta(self): error_code = "foo" error_message = "bar" error_meta = { @@ -175,6 +175,23 @@ def test_get_client_error_with_extra_keys(self): self.assertIn('Endpoint', error) self.assertEqual(error['Endpoint'], "https://foo.bar.baz") + def test_get_client_error_with_extra_response_meta(self): + error_code = "foo" + error_message = "bar" + stub_response_meta = { + "RequestId": "79104EXAMPLEB723", + } + self.stubber.add_client_error( + 'foo', error_code, error_message, + http_status_code=301, + response_meta=stub_response_meta) + with self.stubber: + response = self.emit_get_response_event() + actual_response_meta = response[1]['ResponseMetadata'] + self.assertIn('RequestId', actual_response_meta) + self.assertEqual(actual_response_meta['RequestId'], "79104EXAMPLEB723") + + def test_get_response_errors_with_no_stubs(self): self.stubber.activate() with self.assertRaises(UnStubbedResponseError): From df5d008aaee19578856c49a12adb16253f1f6150 Mon Sep 17 00:00:00 2001 From: JordonPhillips Date: Mon, 26 Feb 2018 09:31:14 -0800 Subject: [PATCH 09/11] Added changelog for metadata stubbing --- .changes/next-release/enhancement-Stubber-50459.json | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changes/next-release/enhancement-Stubber-50459.json diff --git a/.changes/next-release/enhancement-Stubber-50459.json b/.changes/next-release/enhancement-Stubber-50459.json new file mode 100644 index 0000000000..1e3e9c39b1 --- /dev/null +++ b/.changes/next-release/enhancement-Stubber-50459.json @@ -0,0 +1,5 @@ +{ + "type": "enhancement", + "category": "Stubber", + "description": "Added the ability to add items to response metadata with the stubber." +} From 1a08e580b7c688764432bafef7a7eec48f10ed07 Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 26 Feb 2018 15:50:13 -0800 Subject: [PATCH 10/11] Update to latest models --- .changes/next-release/api-change-route53-77453.json | 5 +++++ .changes/next-release/api-change-sts-692.json | 5 +++++ botocore/data/route53/2013-04-01/service-2.json | 1 + botocore/data/sts/2011-06-15/service-2.json | 7 ++++--- 4 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 .changes/next-release/api-change-route53-77453.json create mode 100644 .changes/next-release/api-change-sts-692.json diff --git a/.changes/next-release/api-change-route53-77453.json b/.changes/next-release/api-change-route53-77453.json new file mode 100644 index 0000000000..65a16bf65d --- /dev/null +++ b/.changes/next-release/api-change-route53-77453.json @@ -0,0 +1,5 @@ +{ + "category": "``route53``", + "type": "api-change", + "description": "Update route53 client to latest version" +} diff --git a/.changes/next-release/api-change-sts-692.json b/.changes/next-release/api-change-sts-692.json new file mode 100644 index 0000000000..b05c3798f4 --- /dev/null +++ b/.changes/next-release/api-change-sts-692.json @@ -0,0 +1,5 @@ +{ + "category": "``sts``", + "type": "api-change", + "description": "Update sts client to latest version" +} diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index ac8172b413..030d45ddf0 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -4241,6 +4241,7 @@ "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", + "ap-northeast-3", "sa-east-1", "cn-north-1", "cn-northwest-1", diff --git a/botocore/data/sts/2011-06-15/service-2.json b/botocore/data/sts/2011-06-15/service-2.json index be79b355da..7f433bb82c 100644 --- a/botocore/data/sts/2011-06-15/service-2.json +++ b/botocore/data/sts/2011-06-15/service-2.json @@ -7,6 +7,7 @@ "protocol":"query", "serviceAbbreviation":"AWS STS", "serviceFullName":"AWS Security Token Service", + "serviceId":"STS", "signatureVersion":"v4", "uid":"sts-2011-06-15", "xmlNamespace":"https://sts.amazonaws.com/doc/2011-06-15/" @@ -28,7 +29,7 @@ {"shape":"PackedPolicyTooLargeException"}, {"shape":"RegionDisabledException"} ], - "documentation":"

Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) that you can use to access AWS resources that you might not normally have access to. Typically, you use AssumeRole for cross-account access or federation. For a comparison of AssumeRole with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

Important: You cannot call AssumeRole by using AWS root account credentials; access is denied. You must use credentials for an IAM user or an IAM role to call AssumeRole.

For cross-account access, imagine that you own multiple accounts and need to access resources in each account. You could create long-term credentials in each account to access those resources. However, managing all those credentials and remembering which one can access which account can be time consuming. Instead, you can create one set of long-term credentials in one account and then use temporary security credentials to access all the other accounts by assuming roles in those accounts. For more information about roles, see IAM Roles (Delegation and Federation) in the IAM User Guide.

For federation, you can, for example, grant single sign-on access to the AWS Management Console. If you already have an identity and authentication system in your corporate network, you don't have to recreate user identities in AWS in order to grant those user identities access to AWS. Instead, after a user has been authenticated, you call AssumeRole (and specify the role with the appropriate permissions) to get temporary security credentials for that user. With those temporary security credentials, you construct a sign-in URL that users can use to access the console. For more information, see Common Scenarios for Temporary Credentials in the IAM User Guide.

The temporary security credentials are valid for the duration that you specified when calling AssumeRole, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

The temporary security credentials created by AssumeRole can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

To assume a role, your AWS account must be trusted by the role. The trust relationship is defined in the role's trust policy when the role is created. That trust policy states which accounts are allowed to delegate access to this account's role.

The user who wants to access the role must also have permissions delegated from the role's administrator. If the user is in a different account than the role, then the user's administrator must attach a policy that allows the user to call AssumeRole on the ARN of the role in the other account. If the user is in the same account as the role, then you can either attach a policy to the user (identical to the previous different account user), or you can add the user as a principal directly in the role's trust policy

Using MFA with AssumeRole

You can optionally include multi-factor authentication (MFA) information when you call AssumeRole. This is useful for cross-account scenarios in which you want to make sure that the user who is assuming the role has been authenticated using an AWS MFA device. In that scenario, the trust policy of the role being assumed includes a condition that tests for MFA authentication; if the caller does not include valid MFA information, the request to assume the role is denied. The condition in a trust policy that tests for MFA authentication might look like the following example.

\"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}

For more information, see Configuring MFA-Protected API Access in the IAM User Guide guide.

To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode parameters. The SerialNumber value identifies the user's hardware or virtual MFA device. The TokenCode is the time-based one-time password (TOTP) that the MFA devices produces.

" + "documentation":"

Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) that you can use to access AWS resources that you might not normally have access to. Typically, you use AssumeRole for cross-account access or federation. For a comparison of AssumeRole with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

Important: You cannot call AssumeRole by using AWS root account credentials; access is denied. You must use credentials for an IAM user or an IAM role to call AssumeRole.

For cross-account access, imagine that you own multiple accounts and need to access resources in each account. You could create long-term credentials in each account to access those resources. However, managing all those credentials and remembering which one can access which account can be time consuming. Instead, you can create one set of long-term credentials in one account and then use temporary security credentials to access all the other accounts by assuming roles in those accounts. For more information about roles, see IAM Roles (Delegation and Federation) in the IAM User Guide.

For federation, you can, for example, grant single sign-on access to the AWS Management Console. If you already have an identity and authentication system in your corporate network, you don't have to recreate user identities in AWS in order to grant those user identities access to AWS. Instead, after a user has been authenticated, you call AssumeRole (and specify the role with the appropriate permissions) to get temporary security credentials for that user. With those temporary security credentials, you construct a sign-in URL that users can use to access the console. For more information, see Common Scenarios for Temporary Credentials in the IAM User Guide.

The temporary security credentials are valid for the duration that you specified when calling AssumeRole, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

The temporary security credentials created by AssumeRole can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

To assume a role, your AWS account must be trusted by the role. The trust relationship is defined in the role's trust policy when the role is created. That trust policy states which accounts are allowed to delegate access to this account's role.

The user who wants to access the role must also have permissions delegated from the role's administrator. If the user is in a different account than the role, then the user's administrator must attach a policy that allows the user to call AssumeRole on the ARN of the role in the other account. If the user is in the same account as the role, then you can either attach a policy to the user (identical to the previous different account user), or you can add the user as a principal directly in the role's trust policy. In this case, the trust policy acts as the only resource-based policy in IAM, and users in the same account as the role do not need explicit permission to assume the role. For more information about trust policies and resource-based policies, see IAM Policies in the IAM User Guide.

Using MFA with AssumeRole

You can optionally include multi-factor authentication (MFA) information when you call AssumeRole. This is useful for cross-account scenarios in which you want to make sure that the user who is assuming the role has been authenticated using an AWS MFA device. In that scenario, the trust policy of the role being assumed includes a condition that tests for MFA authentication; if the caller does not include valid MFA information, the request to assume the role is denied. The condition in a trust policy that tests for MFA authentication might look like the following example.

\"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}

For more information, see Configuring MFA-Protected API Access in the IAM User Guide guide.

To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode parameters. The SerialNumber value identifies the user's hardware or virtual MFA device. The TokenCode is the time-based one-time password (TOTP) that the MFA devices produces.

" }, "AssumeRoleWithSAML":{ "name":"AssumeRoleWithSAML", @@ -71,7 +72,7 @@ {"shape":"ExpiredTokenException"}, {"shape":"RegionDisabledException"} ], - "documentation":"

Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider, such as Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity provider.

For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the AWS SDK for iOS and the AWS SDK for Android to uniquely identify a user and supply the user with a consistent identity throughout the lifetime of an application.

To learn more about Amazon Cognito, see Amazon Cognito Overview in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview in the AWS SDK for iOS Developer Guide.

Calling AssumeRoleWithWebIdentity does not require the use of AWS security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term AWS credentials in the application, and without deploying server-based proxy services that use long-term AWS credentials. Instead, the identity of the caller is validated by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS service APIs.

The credentials are valid for the duration that you specified when calling AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

The temporary security credentials created by AssumeRoleWithWebIdentity can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail logs. The entry includes the Subject of the provided Web Identity Token. We recommend that you avoid using any personally identifiable information (PII) in this field. For example, you could instead use a GUID or a pairwise identifier, as suggested in the OIDC specification.

For more information about how to use web identity federation and the AssumeRoleWithWebIdentity API, see the following resources:

" + "documentation":"

Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider, such as Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity provider.

For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the AWS SDK for iOS and the AWS SDK for Android to uniquely identify a user and supply the user with a consistent identity throughout the lifetime of an application.

To learn more about Amazon Cognito, see Amazon Cognito Overview in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview in the AWS SDK for iOS Developer Guide.

Calling AssumeRoleWithWebIdentity does not require the use of AWS security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term AWS credentials in the application, and without deploying server-based proxy services that use long-term AWS credentials. Instead, the identity of the caller is validated by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS service APIs.

The credentials are valid for the duration that you specified when calling AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

The temporary security credentials created by AssumeRoleWithWebIdentity can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail logs. The entry includes the Subject of the provided Web Identity Token. We recommend that you avoid using any personally identifiable information (PII) in this field. For example, you could instead use a GUID or a pairwise identifier, as suggested in the OIDC specification.

For more information about how to use web identity federation and the AssumeRoleWithWebIdentity API, see the following resources:

" }, "DecodeAuthorizationMessage":{ "name":"DecodeAuthorizationMessage", @@ -604,7 +605,7 @@ }, "SAMLAssertionType":{ "type":"string", - "max":50000, + "max":100000, "min":4 }, "Subject":{"type":"string"}, From 73983476b76981b7c16003da4d005f01c274c3ff Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 26 Feb 2018 15:52:58 -0800 Subject: [PATCH 11/11] Bumping version to 1.9.0 --- .changes/1.9.0.json | 22 +++++++++++++++++++ .../api-change-route53-77453.json | 5 ----- .changes/next-release/api-change-sts-692.json | 5 ----- .../enhancement-Stubber-50459.json | 5 ----- .changes/next-release/feature-s3-58965.json | 5 ----- CHANGELOG.rst | 9 ++++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 4 ++-- 8 files changed, 34 insertions(+), 23 deletions(-) create mode 100644 .changes/1.9.0.json delete mode 100644 .changes/next-release/api-change-route53-77453.json delete mode 100644 .changes/next-release/api-change-sts-692.json delete mode 100644 .changes/next-release/enhancement-Stubber-50459.json delete mode 100644 .changes/next-release/feature-s3-58965.json diff --git a/.changes/1.9.0.json b/.changes/1.9.0.json new file mode 100644 index 0000000000..8e48d8861d --- /dev/null +++ b/.changes/1.9.0.json @@ -0,0 +1,22 @@ +[ + { + "category": "Stubber", + "description": "Added the ability to add items to response metadata with the stubber.", + "type": "enhancement" + }, + { + "category": "``sts``", + "description": "Update sts client to latest version", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Update route53 client to latest version", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Default to virtual hosted addressing regardless of signature version (boto/botocore`#1387 `__)", + "type": "feature" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-route53-77453.json b/.changes/next-release/api-change-route53-77453.json deleted file mode 100644 index 65a16bf65d..0000000000 --- a/.changes/next-release/api-change-route53-77453.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "category": "``route53``", - "type": "api-change", - "description": "Update route53 client to latest version" -} diff --git a/.changes/next-release/api-change-sts-692.json b/.changes/next-release/api-change-sts-692.json deleted file mode 100644 index b05c3798f4..0000000000 --- a/.changes/next-release/api-change-sts-692.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "category": "``sts``", - "type": "api-change", - "description": "Update sts client to latest version" -} diff --git a/.changes/next-release/enhancement-Stubber-50459.json b/.changes/next-release/enhancement-Stubber-50459.json deleted file mode 100644 index 1e3e9c39b1..0000000000 --- a/.changes/next-release/enhancement-Stubber-50459.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "enhancement", - "category": "Stubber", - "description": "Added the ability to add items to response metadata with the stubber." -} diff --git a/.changes/next-release/feature-s3-58965.json b/.changes/next-release/feature-s3-58965.json deleted file mode 100644 index 007cb711e1..0000000000 --- a/.changes/next-release/feature-s3-58965.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "feature", - "category": "``s3``", - "description": "Default to virtual hosted addressing regardless of signature version (boto/botocore`#1387 `__)" -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4be65edc2f..e147cf3b28 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,15 @@ CHANGELOG ========= +1.9.0 +===== + +* enhancement:Stubber: Added the ability to add items to response metadata with the stubber. +* api-change:``sts``: Update sts client to latest version +* api-change:``route53``: Update route53 client to latest version +* feature:``s3``: Default to virtual hosted addressing regardless of signature version (boto/botocore`#1387 `__) + + 1.8.50 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index 8e38f00fcf..caf2e33046 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.8.50' +__version__ = '1.9.0' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index e85f82ab9d..07c3ea2427 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ # built documents. # # The short X.Y version. -version = '1.8.' +version = '1.9' # The full version, including alpha/beta/rc tags. -release = '1.8.50' +release = '1.9.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.