Skip to content

Commit

Permalink
Fix adjust issues
Browse files Browse the repository at this point in the history
  • Loading branch information
YakDriver committed Aug 1, 2019
1 parent 6f9ac1b commit c69f642
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 97 deletions.
109 changes: 38 additions & 71 deletions aws/data_source_aws_s3_bucket_objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@ package aws

import (
"fmt"
"log"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)

Expand Down Expand Up @@ -46,10 +45,6 @@ func dataSourceAwsS3BucketObjects() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"keys": {
Type: schema.TypeList,
Computed: true,
Expand All @@ -75,25 +70,7 @@ func dataSourceAwsS3BucketObjectsRead(d *schema.ResourceData, meta interface{})
bucket := d.Get("bucket").(string)
prefix := d.Get("prefix").(string)

input := &s3.HeadBucketInput{
Bucket: aws.String(bucket),
}

log.Printf("[DEBUG] Reading S3 bucket: %s", input)
_, err := conn.HeadBucket(input)

if err != nil {
return fmt.Errorf("Failed listing S3 bucket object keys: %s Bucket: %q", err, bucket)
}

d.SetId(fmt.Sprintf("%s_%s", bucket, prefix))

arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Service: "s3",
Resource: bucket,
}.String()
d.Set("arn", arn)
d.SetId(resource.UniqueId())

listInput := s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Expand All @@ -114,14 +91,9 @@ func dataSourceAwsS3BucketObjectsRead(d *schema.ResourceData, meta interface{})
// "listInput.MaxKeys" refers to max keys returned in a single request
// (i.e., page size), not the total number of keys returned if you page
// through the results. "maxKeys" does refer to total keys returned.
maxKeys := -1
if max, ok := d.GetOk("max_keys"); ok {
maxKeys = max.(int)
if maxKeys > keyRequestPageSize {
listInput.MaxKeys = aws.Int64(int64(keyRequestPageSize))
} else {
listInput.MaxKeys = aws.Int64(int64(maxKeys))
}
maxKeys := int64(d.Get("max_keys").(int))
if maxKeys <= keyRequestPageSize {
listInput.MaxKeys = aws.Int64(maxKeys)
}

if s, ok := d.GetOk("start_after"); ok {
Expand All @@ -132,52 +104,47 @@ func dataSourceAwsS3BucketObjectsRead(d *schema.ResourceData, meta interface{})
listInput.FetchOwner = aws.Bool(b.(bool))
}

keys, prefixes, owners, err := listS3Objects(conn, listInput, maxKeys)
if err != nil {
return err
}
d.Set("keys", keys)
d.Set("common_prefixes", prefixes)
d.Set("owners", owners)

return nil
}
var commonPrefixes []string
var keys []string
var owners []string

func listS3Objects(conn *s3.S3, input s3.ListObjectsV2Input, maxKeys int) ([]string, []string, []string, error) {
var objectList []string
var commonPrefixList []string
var ownerList []string
var continueToken *string
for {
//page through keys
input.ContinuationToken = continueToken

log.Printf("[DEBUG] Requesting page of S3 bucket (%s) object keys", *input.Bucket)
listOutput, err := conn.ListObjectsV2(&input)
if err != nil {
return nil, nil, nil, fmt.Errorf("Failed listing S3 bucket object keys: %s Bucket: %q", err, *input.Bucket)
err := conn.ListObjectsV2Pages(&listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, commonPrefix := range page.CommonPrefixes {
commonPrefixes = append(commonPrefixes, aws.StringValue(commonPrefix.Prefix))
}

for _, content := range listOutput.Contents {
objectList = append(objectList, *content.Key)
if input.FetchOwner != nil && *input.FetchOwner {
ownerList = append(ownerList, *content.Owner.ID)
}
if maxKeys > -1 && len(objectList) >= maxKeys {
break
for _, object := range page.Contents {
keys = append(keys, aws.StringValue(object.Key))

if object.Owner != nil {
owners = append(owners, aws.StringValue(object.Owner.ID))
}
}

for _, commonPrefix := range listOutput.CommonPrefixes {
commonPrefixList = append(commonPrefixList, *commonPrefix.Prefix)
}
maxKeys = maxKeys - aws.Int64Value(page.KeyCount)

// stop requesting if no more results OR all wanted keys done
if !*listOutput.IsTruncated || (maxKeys > -1 && len(objectList) >= maxKeys) {
break
if maxKeys <= keyRequestPageSize {
listInput.MaxKeys = aws.Int64(maxKeys)
}
continueToken = listOutput.NextContinuationToken

return !lastPage
})

if err != nil {
return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucket, err)
}

return objectList, commonPrefixList, ownerList, nil
if err := d.Set("common_prefixes", commonPrefixes); err != nil {
return fmt.Errorf("error setting common_prefixes: %s", err)
}

if err := d.Set("keys", keys); err != nil {
return fmt.Errorf("error setting keys: %s", err)
}

if err := d.Set("owners", owners); err != nil {
return fmt.Errorf("error setting owners: %s", err)
}

return nil
}
64 changes: 43 additions & 21 deletions aws/data_source_aws_s3_bucket_objects_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,20 @@ import (

func TestAccDataSourceAWSS3BucketObjects_basic(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigBasic(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigBasic(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr(
"data.aws_s3_bucket_objects.yesh",
"arn",
fmt.Sprintf("arn:aws:s3:::tf-objects-test-bucket-%d", rInt),
),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "2"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.0", "arch/navajo/north_window"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.1", "arch/navajo/sand_dune"),
Expand All @@ -38,15 +36,18 @@ func TestAccDataSourceAWSS3BucketObjects_basic(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_all(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigAll(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigAll(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "7"),
Expand All @@ -65,15 +66,18 @@ func TestAccDataSourceAWSS3BucketObjects_all(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_prefixes(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigPrefixes(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigPrefixes(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "1"),
Expand All @@ -91,15 +95,18 @@ func TestAccDataSourceAWSS3BucketObjects_prefixes(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_encoded(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigEncoded(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigExtraResource(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigEncoded(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "2"),
Expand All @@ -113,15 +120,18 @@ func TestAccDataSourceAWSS3BucketObjects_encoded(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_maxKeys(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigMaxKeys(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigMaxKeys(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "2"),
Expand All @@ -135,15 +145,18 @@ func TestAccDataSourceAWSS3BucketObjects_maxKeys(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_startAfter(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigStartAfter(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigStartAfter(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "1"),
Expand All @@ -156,15 +169,18 @@ func TestAccDataSourceAWSS3BucketObjects_startAfter(t *testing.T) {

func TestAccDataSourceAWSS3BucketObjects_fetchOwner(t *testing.T) {
rInt := acctest.RandInt()
basic := testAccAWSDataSourceS3ObjectsConfigOwners(rInt)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: basic,
Config: testAccAWSDataSourceS3ObjectsConfigResources(rInt), // NOTE: contains no data source
// Does not need Check
},
{
Config: testAccAWSDataSourceS3ObjectsConfigOwners(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectsDataSourceExists("data.aws_s3_bucket_objects.yesh"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_objects.yesh", "keys.#", "2"),
Expand Down Expand Up @@ -274,7 +290,7 @@ data "aws_s3_bucket_objects" "yesh" {
`, testAccAWSDataSourceS3ObjectsConfigResources(randInt))
}

func testAccAWSDataSourceS3ObjectsConfigEncoded(randInt int) string {
func testAccAWSDataSourceS3ObjectsConfigExtraResource(randInt int) string {
return fmt.Sprintf(`
%s
Expand All @@ -283,13 +299,19 @@ resource "aws_s3_bucket_object" "object8" {
key = "arch/ru b ic on"
content = "Goose Island"
}
`, testAccAWSDataSourceS3ObjectsConfigResources(randInt))
}

func testAccAWSDataSourceS3ObjectsConfigEncoded(randInt int) string {
return fmt.Sprintf(`
%s
data "aws_s3_bucket_objects" "yesh" {
bucket = "${aws_s3_bucket.objects_bucket.id}"
encoding_type = "url"
prefix = "arch/ru"
}
`, testAccAWSDataSourceS3ObjectsConfigResources(randInt))
`, testAccAWSDataSourceS3ObjectsConfigExtraResource(randInt))
}

func testAccAWSDataSourceS3ObjectsConfigMaxKeys(randInt int) string {
Expand Down
8 changes: 3 additions & 5 deletions website/docs/d/s3_bucket_objects.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ description: |-

# Data Source: aws_s3_bucket_objects

~> **NOTE on `max_keys`:** Retrieving very large numbers of keys can adversely affect Terraform's performance.

The bucket-objects data source returns keys (i.e., file names) and other metadata about objects in an S3 bucket.

## Example Usage
Expand All @@ -34,18 +36,14 @@ The following arguments are supported:
* `prefix` - (Optional) Limits results to object keys with this prefix (Default: none)
* `delimiter` - (Optional) A character used to group keys (Default: none)
* `encoding_type` - (Optional) Encodes keys using this method (Default: none; besides none, only "url" can be used)
* `max_keys` - (Optional) Maximum object keys to return or `-1` to retrieve all keys (Default: 1000)

~> **NOTE on `max_keys`:** Retrieving very large numbers of keys can adversely affect Terraform's performance.

* `max_keys` - (Optional) Maximum object keys to return (Default: 1000)
* `start_after` - (Optional) Returns key names lexicographically after a specific object key in your bucket (Default: none; S3 lists object keys in UTF-8 character encoding in lexicographical order)
* `fetch_owner` - (Optional) Boolean specifying whether to populate the owner list (Default: false)

## Attributes Reference

In addition to all arguments above, the following attributes are exported:

* `arn` - ARN of the bucket in the format `arn:aws:s3:::bucketname`
* `keys` - List of strings representing object keys
* `common_prefixes` - List of any keys between `prefix` and the next occurrence of `delimiter` (i.e., similar to subdirectories of the `prefix` "directory"); the list is only returned when you specify `delimiter`
* `owners` - List of strings representing object owner IDs (see `fetch_owner` above)

0 comments on commit c69f642

Please sign in to comment.