diff --git a/.changelog/34504.txt b/.changelog/34504.txt new file mode 100644 index 00000000000..5007d2339ef --- /dev/null +++ b/.changelog/34504.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket_logging: Add `target_object_key_format` configuration block to support [automatic date-based partitioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html#server-access-logging-overview) +``` \ No newline at end of file diff --git a/.ci/.golangci2.yml b/.ci/.golangci2.yml index 5a79b988b26..dc927d801be 100644 --- a/.ci/.golangci2.yml +++ b/.ci/.golangci2.yml @@ -86,6 +86,10 @@ issues: - staticcheck path: "internal/service/s3" text: "SA1019: \\w+.(\\w+) is deprecated: (\\w+) has been deprecated" + - linters: + - staticcheck + path: "internal/service/s3" + text: "SA1019: \\w+.(\\w+) is deprecated: This member has been deprecated" - linters: - staticcheck path: internal/service/securityhub/ diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 601064d418d..70e41d09098 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -43,6 +43,10 @@ import ( const ( resNameBucket = "Bucket" + + // General timeout for S3 bucket changes to propagate. + // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel. + s3BucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name ) // @SDKResource("aws_s3_bucket", name="Bucket") @@ -1047,7 +1051,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { + if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) Lifecycle Configuration: %s", d.Id(), err) } @@ -1076,7 +1080,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented) { + if err != nil && !tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented) { return sdkdiag.AppendErrorf(diags, "getting S3 Bucket replication: %s", err) } diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index 5b04bf0b61d..caf78a69289 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -7,22 +7,25 @@ import ( "context" "fmt" "log" - "reflect" + "strconv" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" "github.com/hashicorp/terraform-provider-aws/internal/verify" + "golang.org/x/exp/slices" ) // @SDKResource("aws_s3_bucket_lifecycle_configuration") @@ -32,10 +35,16 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketLifecycleConfigurationRead, UpdateWithoutTimeout: resourceBucketLifecycleConfigurationUpdate, DeleteWithoutTimeout: resourceBucketLifecycleConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(3 * time.Minute), + Update: schema.DefaultTimeout(3 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "bucket": { Type: schema.TypeString, @@ -43,14 +52,12 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 63), }, - "expected_bucket_owner": { Type: schema.TypeString, Optional: true, ForceNew: true, ValidateFunc: verify.ValidAccountID, }, - "rule": { Type: schema.TypeList, Required: true, @@ -160,13 +167,11 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { }, }, }, - "id": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 255), }, - "noncurrent_version_expiration": { Type: schema.TypeList, Optional: true, @@ -202,29 +207,23 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TransitionStorageClass](), }, }, }, }, - "prefix": { Type: schema.TypeString, Optional: true, Deprecated: "Use filter instead", }, - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - LifecycleRuleStatusDisabled, - LifecycleRuleStatusEnabled, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(lifecycleRuleStatus_Values(), false), }, - "transition": { Type: schema.TypeSet, Optional: true, @@ -241,9 +240,9 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TransitionStorageClass](), }, }, }, @@ -256,70 +255,62 @@ func ResourceBucketLifecycleConfiguration() *schema.Resource { } func resourceBucketLifecycleConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) expectedBucketOwner := d.Get("expected_bucket_owner").(string) - - rules, err := ExpandLifecycleRules(ctx, d.Get("rule").([]interface{})) - if err != nil { - return diag.Errorf("creating S3 Lifecycle Configuration for bucket (%s): %s", bucket, err) - } - + rules := expandLifecycleRules(ctx, d.Get("rule").([]interface{})) input := &s3.PutBucketLifecycleConfigurationInput{ Bucket: aws.String(bucket), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + LifecycleConfiguration: &types.BucketLifecycleConfiguration{ Rules: rules, }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutBucketLifecycleConfigurationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketLifecycleConfiguration(ctx, input) + }, errCodeNoSuchBucket) if err != nil { - return diag.Errorf("creating S3 Lifecycle Configuration for bucket (%s): %s", bucket, err) + return diag.Errorf("creating S3 Bucket (%s) Lifecycle Configuration: %s", bucket, err) } d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - if err = waitForLifecycleConfigurationRulesStatus(ctx, conn, bucket, expectedBucketOwner, rules); err != nil { - return diag.Errorf("waiting for S3 Lifecycle Configuration for bucket (%s) to reach expected rules status after update: %s", d.Id(), err) + _, err = waitLifecycleRulesEquals(ctx, conn, bucket, expectedBucketOwner, rules, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + diag.Errorf("waiting for S3 Bucket Lifecycle Configuration (%s) create: %s", d.Id(), err) } return resourceBucketLifecycleConfigurationRead(ctx, d, meta) } func resourceBucketLifecycleConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - input := &s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - var lastOutput, output *s3.GetBucketLifecycleConfigurationOutput + const ( + lifecycleConfigurationExtraRetryDelay = 5 * time.Second + lifecycleConfigurationRulesSteadyTimeout = 2 * time.Minute + ) + var lastOutput, output []types.LifecycleRule err = retry.RetryContext(ctx, lifecycleConfigurationRulesSteadyTimeout, func() *retry.RetryError { var err error time.Sleep(lifecycleConfigurationExtraRetryDelay) - output, err = conn.GetBucketLifecycleConfigurationWithContext(ctx, input) + output, err = findLifecycleRules(ctx, conn, bucket, expectedBucketOwner) - if d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + if d.IsNewResource() && tfresource.NotFound(err) { return retry.RetryableError(err) } @@ -327,31 +318,31 @@ func resourceBucketLifecycleConfigurationRead(ctx context.Context, d *schema.Res return retry.NonRetryableError(err) } - if lastOutput == nil || !reflect.DeepEqual(*lastOutput, *output) { + if lastOutput == nil || !lifecycleRulesEqual(lastOutput, output) { lastOutput = output - return retry.RetryableError(fmt.Errorf("bucket lifecycle configuration has not stablized; trying again")) + return retry.RetryableError(fmt.Errorf("S3 Bucket Lifecycle Configuration (%s) has not stablized; retrying", d.Id())) } return nil }) if tfresource.TimedOut(err) { - output, err = conn.GetBucketLifecycleConfigurationWithContext(ctx, input) + output, err = findLifecycleRules(ctx, conn, bucket, expectedBucketOwner) } - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Lifecycle Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return diag.Errorf("getting S3 Bucket Lifecycle Configuration (%s): %s", d.Id(), err) + return diag.Errorf("reading S3 Bucket Lifecycle Configuration (%s): %s", d.Id(), err) } d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("rule", FlattenLifecycleRules(ctx, output.Rules)); err != nil { + if err := d.Set("rule", flattenLifecycleRules(ctx, output)); err != nil { return diag.Errorf("setting rule: %s", err) } @@ -359,46 +350,43 @@ func resourceBucketLifecycleConfigurationRead(ctx context.Context, d *schema.Res } func resourceBucketLifecycleConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - rules, err := ExpandLifecycleRules(ctx, d.Get("rule").([]interface{})) - if err != nil { - return diag.Errorf("updating S3 Bucket Lifecycle Configuration rule: %s", err) - } - + rules := expandLifecycleRules(ctx, d.Get("rule").([]interface{})) input := &s3.PutBucketLifecycleConfigurationInput{ Bucket: aws.String(bucket), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + LifecycleConfiguration: &types.BucketLifecycleConfiguration{ Rules: rules, }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutBucketLifecycleConfigurationWithContext(ctx, input) - }, ErrCodeNoSuchLifecycleConfiguration) + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketLifecycleConfiguration(ctx, input) + }, errCodeNoSuchLifecycleConfiguration) if err != nil { return diag.Errorf("updating S3 Bucket Lifecycle Configuration (%s): %s", d.Id(), err) } - if err := waitForLifecycleConfigurationRulesStatus(ctx, conn, bucket, expectedBucketOwner, rules); err != nil { - return diag.Errorf("waiting for S3 Lifecycle Configuration for bucket (%s) to reach expected rules status after update: %s", d.Id(), err) + _, err = waitLifecycleRulesEquals(ctx, conn, bucket, expectedBucketOwner, rules, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + diag.Errorf("waiting for S3 Bucket Lifecycle Configuration (%s) update: %s", d.Id(), err) } return resourceBucketLifecycleConfigurationRead(ctx, d, meta) } func resourceBucketLifecycleConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -408,14 +396,13 @@ func resourceBucketLifecycleConfigurationDelete(ctx context.Context, d *schema.R input := &s3.DeleteBucketLifecycleInput{ Bucket: aws.String(bucket), } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.DeleteBucketLifecycleWithContext(ctx, input) + _, err = conn.DeleteBucketLifecycle(ctx, input) - if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchLifecycleConfiguration) { return nil } @@ -423,6 +410,14 @@ func resourceBucketLifecycleConfigurationDelete(ctx context.Context, d *schema.R return diag.Errorf("deleting S3 Bucket Lifecycle Configuration (%s): %s", d.Id(), err) } + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findLifecycleRules(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket Lifecyle Configuration (%s) delete: %s", d.Id(), err) + } + return nil } @@ -433,14 +428,14 @@ func resourceBucketLifecycleConfigurationDelete(ctx context.Context, d *schema.R // which are incorrectly suppressed when using the verify.SuppressMissingOptionalConfigurationBlock method. func suppressMissingFilterConfigurationBlock(k, old, new string, d *schema.ResourceData) bool { if strings.HasSuffix(k, "filter.#") { - o, n := d.GetChange(k) - oVal, nVal := o.(int), n.(int) + oraw, nraw := d.GetChange(k) + o, n := oraw.(int), nraw.(int) - if oVal == 1 && nVal == 0 { + if o == 1 && n == 0 { return true } - if oVal == 1 && nVal == 1 { + if o == 1 && n == 1 { return old == "1" && new == "0" } @@ -448,3 +443,609 @@ func suppressMissingFilterConfigurationBlock(k, old, new string, d *schema.Resou } return false } + +func findLifecycleRules(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) ([]types.LifecycleRule, error) { + input := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketLifecycleConfiguration(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchLifecycleConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || len(output.Rules) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Rules, nil +} + +func lifecycleRulesEqual(rules1, rules2 []types.LifecycleRule) bool { + if len(rules1) != len(rules2) { + return false + } + + for _, rule1 := range rules1 { + // We consider 2 LifecycleRules equal if their IDs and Statuses are equal. + if !slices.ContainsFunc(rules2, func(rule2 types.LifecycleRule) bool { + return aws.ToString(rule1.ID) == aws.ToString(rule2.ID) && rule1.Status == rule2.Status + }) { + return false + } + } + + return true +} + +func statusLifecycleRulesEquals(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string, rules []types.LifecycleRule) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findLifecycleRules(ctx, conn, bucket, expectedBucketOwner) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, strconv.FormatBool(lifecycleRulesEqual(output, rules)), nil + } +} + +func waitLifecycleRulesEquals(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string, rules []types.LifecycleRule, timeout time.Duration) ([]types.LifecycleRule, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Target: []string{strconv.FormatBool(true)}, + Refresh: statusLifecycleRulesEquals(ctx, conn, bucket, expectedBucketOwner, rules), + Timeout: timeout, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 3, + NotFoundChecks: 20, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.([]types.LifecycleRule); ok { + return output, err + } + + return nil, err +} + +const ( + lifecycleRuleStatusDisabled = "Disabled" + lifecycleRuleStatusEnabled = "Enabled" +) + +func lifecycleRuleStatus_Values() []string { + return []string{ + lifecycleRuleStatusDisabled, + lifecycleRuleStatusEnabled, + } +} + +func expandLifecycleRules(ctx context.Context, l []interface{}) []types.LifecycleRule { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.LifecycleRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + result := types.LifecycleRule{} + + if v, ok := tfMap["abort_incomplete_multipart_upload"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.AbortIncompleteMultipartUpload = expandLifecycleRuleAbortIncompleteMultipartUpload(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { + result.Expiration = expandLifecycleRuleExpiration(v) + } + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { + result.Filter = expandLifecycleRuleFilter(ctx, v) + } + + if v, ok := tfMap["prefix"].(string); ok && result.Filter == nil { + // If neither the filter block nor the prefix are specified, + // apply the Default behavior from v3.x of the provider; + // otherwise, set the prefix as specified in Terraform. + if v == "" { + result.Filter = &types.LifecycleRuleFilterMemberPrefix{ + Value: v, + } + } else { + result.Prefix = aws.String(v) + } + } + + if v, ok := tfMap["id"].(string); ok { + result.ID = aws.String(v) + } + + if v, ok := tfMap["noncurrent_version_expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.NoncurrentVersionExpiration = expandLifecycleRuleNoncurrentVersionExpiration(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["noncurrent_version_transition"].(*schema.Set); ok && v.Len() > 0 { + result.NoncurrentVersionTransitions = expandLifecycleRuleNoncurrentVersionTransitions(v.List()) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.ExpirationStatus(v) + } + + if v, ok := tfMap["transition"].(*schema.Set); ok && v.Len() > 0 { + result.Transitions = expandLifecycleRuleTransitions(v.List()) + } + + results = append(results, result) + } + + return results +} + +func expandLifecycleRuleAbortIncompleteMultipartUpload(m map[string]interface{}) *types.AbortIncompleteMultipartUpload { + if len(m) == 0 { + return nil + } + + result := &types.AbortIncompleteMultipartUpload{} + + if v, ok := m["days_after_initiation"].(int); ok { + result.DaysAfterInitiation = aws.Int32(int32(v)) + } + + return result +} + +func expandLifecycleRuleExpiration(l []interface{}) *types.LifecycleExpiration { + if len(l) == 0 { + return nil + } + + result := &types.LifecycleExpiration{} + + if l[0] == nil { + return result + } + + m := l[0].(map[string]interface{}) + + if v, ok := m["date"].(string); ok && v != "" { + t, _ := time.Parse(time.RFC3339, v) + result.Date = aws.Time(t) + } + + if v, ok := m["days"].(int); ok && v > 0 { + result.Days = aws.Int32(int32(v)) + } + + // This cannot be specified with Days or Date + if v, ok := m["expired_object_delete_marker"].(bool); ok && result.Date == nil && aws.ToInt32(result.Days) == 0 { + result.ExpiredObjectDeleteMarker = aws.Bool(v) + } + + return result +} + +func expandLifecycleRuleFilter(ctx context.Context, l []interface{}) types.LifecycleRuleFilter { + if len(l) == 0 { + return nil + } + + var result types.LifecycleRuleFilter + + if l[0] == nil { + return result + } + + m := l[0].(map[string]interface{}) + + if v, ok := m["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result = expandLifecycleRuleFilterMemberAnd(ctx, v[0].(map[string]interface{})) + } + + if v, null, _ := nullable.Int(m["object_size_greater_than"].(string)).Value(); !null && v >= 0 { + result = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{ + Value: v, + } + } + + if v, null, _ := nullable.Int(m["object_size_less_than"].(string)).Value(); !null && v > 0 { + result = &types.LifecycleRuleFilterMemberObjectSizeLessThan{ + Value: v, + } + } + + if v, ok := m["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result = expandLifecycleRuleFilterMemberTag(v[0].(map[string]interface{})) + } + + // Per AWS S3 API, "A Filter must have exactly one of Prefix, Tag, or And specified"; + // Specifying more than one of the listed parameters results in a MalformedXML error. + // In practice, this also includes ObjectSizeGreaterThan and ObjectSizeLessThan. + if v, ok := m["prefix"].(string); ok && result == nil { + result = &types.LifecycleRuleFilterMemberPrefix{ + Value: v, + } + } + + return result +} + +func expandLifecycleRuleFilterMemberAnd(ctx context.Context, m map[string]interface{}) *types.LifecycleRuleFilterMemberAnd { + if len(m) == 0 { + return nil + } + + result := &types.LifecycleRuleFilterMemberAnd{ + Value: types.LifecycleRuleAndOperator{}, + } + + if v, ok := m["object_size_greater_than"].(int); ok && v > 0 { + result.Value.ObjectSizeGreaterThan = aws.Int64(int64(v)) + } + + if v, ok := m["object_size_less_than"].(int); ok && v > 0 { + result.Value.ObjectSizeLessThan = aws.Int64(int64(v)) + } + + if v, ok := m["prefix"].(string); ok { + result.Value.Prefix = aws.String(v) + } + + if v, ok := m["tags"].(map[string]interface{}); ok && len(v) > 0 { + tags := tagsV2(tftags.New(ctx, v).IgnoreAWS()) + if len(tags) > 0 { + result.Value.Tags = tags + } + } + + return result +} + +func expandLifecycleRuleFilterMemberTag(m map[string]interface{}) *types.LifecycleRuleFilterMemberTag { + if len(m) == 0 { + return nil + } + + result := &types.LifecycleRuleFilterMemberTag{ + Value: types.Tag{}, + } + + if key, ok := m["key"].(string); ok { + result.Value.Key = aws.String(key) + } + + if value, ok := m["value"].(string); ok { + result.Value.Value = aws.String(value) + } + + return result +} + +func expandLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { + if len(m) == 0 { + return nil + } + + result := &types.NoncurrentVersionExpiration{} + + if v, null, _ := nullable.Int(m["newer_noncurrent_versions"].(string)).Value(); !null && v > 0 { + result.NewerNoncurrentVersions = aws.Int32(int32(v)) + } + + if v, ok := m["noncurrent_days"].(int); ok { + result.NoncurrentDays = aws.Int32(int32(v)) + } + + return result +} + +func expandLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []types.NoncurrentVersionTransition { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.NoncurrentVersionTransition + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + transition := types.NoncurrentVersionTransition{} + + if v, null, _ := nullable.Int(tfMap["newer_noncurrent_versions"].(string)).Value(); !null && v > 0 { + transition.NewerNoncurrentVersions = aws.Int32(int32(v)) + } + + if v, ok := tfMap["noncurrent_days"].(int); ok { + transition.NoncurrentDays = aws.Int32(int32(v)) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + transition.StorageClass = types.TransitionStorageClass(v) + } + + results = append(results, transition) + } + + return results +} + +func expandLifecycleRuleTransitions(l []interface{}) []types.Transition { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.Transition + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + transition := types.Transition{} + + if v, ok := tfMap["date"].(string); ok && v != "" { + t, _ := time.Parse(time.RFC3339, v) + transition.Date = aws.Time(t) + } + + // Only one of "date" and "days" can be configured + // so only set the transition.Days value when transition.Date is nil + // By default, tfMap["days"] = 0 if not explicitly configured in terraform. + if v, ok := tfMap["days"].(int); ok && v >= 0 && transition.Date == nil { + transition.Days = aws.Int32(int32(v)) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + transition.StorageClass = types.TransitionStorageClass(v) + } + + results = append(results, transition) + } + + return results +} + +func flattenLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []interface{} { + if len(rules) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, rule := range rules { + m := map[string]interface{}{ + "status": rule.Status, + } + + if rule.AbortIncompleteMultipartUpload != nil { + m["abort_incomplete_multipart_upload"] = flattenLifecycleRuleAbortIncompleteMultipartUpload(rule.AbortIncompleteMultipartUpload) + } + + if rule.Expiration != nil { + m["expiration"] = flattenLifecycleRuleExpiration(rule.Expiration) + } + + if rule.Filter != nil { + m["filter"] = flattenLifecycleRuleFilter(ctx, rule.Filter) + } + + if rule.ID != nil { + m["id"] = aws.ToString(rule.ID) + } + + if rule.NoncurrentVersionExpiration != nil { + m["noncurrent_version_expiration"] = flattenLifecycleRuleNoncurrentVersionExpiration(rule.NoncurrentVersionExpiration) + } + + if rule.NoncurrentVersionTransitions != nil { + m["noncurrent_version_transition"] = flattenLifecycleRuleNoncurrentVersionTransitions(rule.NoncurrentVersionTransitions) + } + + if rule.Prefix != nil { + m["prefix"] = aws.ToString(rule.Prefix) + } + + if rule.Transitions != nil { + m["transition"] = flattenLifecycleRuleTransitions(rule.Transitions) + } + + results = append(results, m) + } + + return results +} + +func flattenLifecycleRuleAbortIncompleteMultipartUpload(u *types.AbortIncompleteMultipartUpload) []interface{} { + if u == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if u.DaysAfterInitiation != nil { + m["days_after_initiation"] = int(aws.ToInt32(u.DaysAfterInitiation)) + } + + return []interface{}{m} +} + +func flattenLifecycleRuleExpiration(expiration *types.LifecycleExpiration) []interface{} { + if expiration == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if expiration.Date != nil { + m["date"] = expiration.Date.Format(time.RFC3339) + } + + if expiration.Days != nil { + m["days"] = int(aws.ToInt32(expiration.Days)) + } + + if expiration.ExpiredObjectDeleteMarker != nil { + m["expired_object_delete_marker"] = aws.ToBool(expiration.ExpiredObjectDeleteMarker) + } + + return []interface{}{m} +} + +func flattenLifecycleRuleFilter(ctx context.Context, filter types.LifecycleRuleFilter) []interface{} { + if filter == nil { + return nil + } + + m := make(map[string]interface{}) + + switch v := filter.(type) { + case *types.LifecycleRuleFilterMemberAnd: + m["and"] = flattenLifecycleRuleFilterMemberAnd(ctx, v) + case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: + m["object_size_greater_than"] = strconv.FormatInt(v.Value, 10) + case *types.LifecycleRuleFilterMemberObjectSizeLessThan: + m["object_size_less_than"] = strconv.FormatInt(v.Value, 10) + case *types.LifecycleRuleFilterMemberPrefix: + m["prefix"] = v.Value + case *types.LifecycleRuleFilterMemberTag: + m["tag"] = flattenLifecycleRuleFilterMemberTag(v) + default: + return nil + } + + return []interface{}{m} +} + +func flattenLifecycleRuleFilterMemberAnd(ctx context.Context, andOp *types.LifecycleRuleFilterMemberAnd) []interface{} { + if andOp == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "object_size_greater_than": andOp.Value.ObjectSizeGreaterThan, + "object_size_less_than": andOp.Value.ObjectSizeLessThan, + } + + if v := andOp.Value.Prefix; v != nil { + m["prefix"] = aws.ToString(v) + } + + if v := andOp.Value.Tags; v != nil { + m["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + } + + return []interface{}{m} +} + +func flattenLifecycleRuleFilterMemberTag(op *types.LifecycleRuleFilterMemberTag) []interface{} { + if op == nil { + return nil + } + + m := make(map[string]interface{}) + + if v := op.Value.Key; v != nil { + m["key"] = aws.ToString(v) + } + + if v := op.Value.Value; v != nil { + m["value"] = aws.ToString(v) + } + + return []interface{}{m} +} + +func flattenLifecycleRuleNoncurrentVersionExpiration(expiration *types.NoncurrentVersionExpiration) []interface{} { + if expiration == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if expiration.NewerNoncurrentVersions != nil { + m["newer_noncurrent_versions"] = strconv.FormatInt(int64(aws.ToInt32(expiration.NewerNoncurrentVersions)), 10) + } + + if expiration.NoncurrentDays != nil { + m["noncurrent_days"] = int(aws.ToInt32(expiration.NoncurrentDays)) + } + + return []interface{}{m} +} + +func flattenLifecycleRuleNoncurrentVersionTransitions(transitions []types.NoncurrentVersionTransition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, transition := range transitions { + m := map[string]interface{}{ + "storage_class": transition.StorageClass, + } + + if transition.NewerNoncurrentVersions != nil { + m["newer_noncurrent_versions"] = strconv.FormatInt(int64(aws.ToInt32(transition.NewerNoncurrentVersions)), 10) + } + + if transition.NoncurrentDays != nil { + m["noncurrent_days"] = int(aws.ToInt32(transition.NoncurrentDays)) + } + + results = append(results, m) + } + + return results +} + +func flattenLifecycleRuleTransitions(transitions []types.Transition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, transition := range transitions { + m := map[string]interface{}{ + "days": transition.Days, + "storage_class": transition.StorageClass, + } + + if transition.Date != nil { + m["date"] = transition.Date.Format(time.RFC3339) + } + + results = append(results, m) + } + + return results +} diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index bb59479fcf5..b6f46424dc4 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -9,9 +9,7 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -19,6 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketLifecycleConfiguration_basic(t *testing.T) { @@ -28,7 +27,7 @@ func TestAccS3BucketLifecycleConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -64,7 +63,7 @@ func TestAccS3BucketLifecycleConfiguration_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -90,7 +89,7 @@ func TestAccS3BucketLifecycleConfiguration_filterWithPrefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -145,7 +144,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_ObjectSizeGreaterThan(t *testi resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -181,7 +180,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_ObjectSizeGreaterThanZero(t *t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -217,7 +216,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_ObjectSizeLessThan(t *testing. resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -253,7 +252,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_ObjectSizeRange(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -291,7 +290,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_ObjectSizeRangeAndPrefix(t *te resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -328,7 +327,7 @@ func TestAccS3BucketLifecycleConfiguration_disableRule(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -374,7 +373,7 @@ func TestAccS3BucketLifecycleConfiguration_multipleRules(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -396,11 +395,11 @@ func TestAccS3BucketLifecycleConfiguration_multipleRules(t *testing.T) { }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*.transition.*", map[string]string{ "days": "30", - "storage_class": s3.StorageClassStandardIa, + "storage_class": string(types.StorageClassStandardIa), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*.transition.*", map[string]string{ "days": "60", - "storage_class": s3.StorageClassGlacier, + "storage_class": string(types.StorageClassGlacier), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "tmp", @@ -429,12 +428,12 @@ func TestAccS3BucketLifecycleConfiguration_multipleRules_noFilterOrPrefix(t *tes resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_multipleRulesNoFilterOrPrefix(rName, s3.ReplicationRuleStatusEnabled), + Config: testAccBucketLifecycleConfigurationConfig_multipleRulesNoFilterOrPrefix(rName, tfs3.LifecycleRuleStatusEnabled), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), @@ -460,7 +459,7 @@ func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionExpiration(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -490,7 +489,7 @@ func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionTransition(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -503,11 +502,11 @@ func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionTransition(t *testin }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*.noncurrent_version_transition.*", map[string]string{ "noncurrent_days": "30", - "storage_class": s3.StorageClassStandardIa, + "storage_class": string(types.StorageClassStandardIa), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*.noncurrent_version_transition.*", map[string]string{ "noncurrent_days": "60", - "storage_class": s3.StorageClassGlacier, + "storage_class": string(types.StorageClassGlacier), }), ), }, @@ -528,7 +527,7 @@ func TestAccS3BucketLifecycleConfiguration_prefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -564,7 +563,7 @@ func TestAccS3BucketLifecycleConfiguration_Filter_Tag(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -601,7 +600,7 @@ func TestAccS3BucketLifecycleConfiguration_RuleExpiration_expireMarkerOnly(t *te resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -647,7 +646,7 @@ func TestAccS3BucketLifecycleConfiguration_RuleExpiration_emptyBlock(t *testing. resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -677,7 +676,7 @@ func TestAccS3BucketLifecycleConfiguration_ruleAbortIncompleteMultipartUpload(t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -728,12 +727,12 @@ func TestAccS3BucketLifecycleConfiguration_TransitionDate_standardIa(t *testing. resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, s3.TransitionStorageClassStandardIa), + Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, string(types.TransitionStorageClassStandardIa)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), @@ -760,12 +759,12 @@ func TestAccS3BucketLifecycleConfiguration_TransitionDate_intelligentTiering(t * resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), @@ -787,18 +786,18 @@ func TestAccS3BucketLifecycleConfiguration_TransitionStorageClassOnly_intelligen resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_transitionStorageClassOnly(rName, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_transitionStorageClassOnly(rName, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.0.transition.*", map[string]string{ "days": "0", "date": "", - "storage_class": s3.StorageClassIntelligentTiering, + "storage_class": string(types.StorageClassIntelligentTiering), }), ), }, @@ -819,12 +818,12 @@ func TestAccS3BucketLifecycleConfiguration_TransitionZeroDays_intelligentTiering resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), @@ -848,18 +847,18 @@ func TestAccS3BucketLifecycleConfiguration_TransitionUpdateBetweenDaysAndDate_in resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), }, { - Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_dateTransition(rName, date, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), @@ -870,7 +869,7 @@ func TestAccS3BucketLifecycleConfiguration_TransitionUpdateBetweenDaysAndDate_in ImportStateVerify: true, }, { - Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, s3.StorageClassIntelligentTiering), + Config: testAccBucketLifecycleConfigurationConfig_zeroDaysTransition(rName, string(types.StorageClassIntelligentTiering)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), ), @@ -887,7 +886,7 @@ func TestAccS3BucketLifecycleConfiguration_EmptyFilter_NonCurrentVersions(t *tes resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -914,7 +913,7 @@ func TestAccS3BucketLifecycleConfiguration_migrate_noChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -957,7 +956,7 @@ func TestAccS3BucketLifecycleConfiguration_migrate_withChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -1000,7 +999,7 @@ func TestAccS3BucketLifecycleConfiguration_Update_filterWithAndToFilterWithPrefi resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -1031,7 +1030,7 @@ func TestAccS3BucketLifecycleConfiguration_Update_filterWithAndToFilterWithPrefi func testAccCheckBucketLifecycleConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_lifecycle_configuration" { @@ -1043,19 +1042,9 @@ func testAccCheckBucketLifecycleConfigurationDestroy(ctx context.Context) resour return err } - input := &s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } + _, err = tfs3.FindLifecycleRules(ctx, conn, bucket, expectedBucketOwner) - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketLifecycleConfigurationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { + if tfresource.NotFound(err) { continue } @@ -1063,9 +1052,7 @@ func testAccCheckBucketLifecycleConfigurationDestroy(ctx context.Context) resour return err } - if config, ok := output.(*s3.GetBucketLifecycleConfigurationOutput); ok && config != nil && len(config.Rules) != 0 { - return fmt.Errorf("S3 Lifecycle Configuration for bucket (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Bucket Lifecycle Configuration %s still exists", rs.Primary.ID) } return nil @@ -1079,38 +1066,16 @@ func testAccCheckBucketLifecycleConfigurationExists(ctx context.Context, n strin return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := tfs3.ParseResourceID(rs.Primary.ID) if err != nil { return err } - input := &s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketLifecycleConfigurationWithContext(ctx, input) - }, tfs3.ErrCodeNoSuchLifecycleConfiguration) - - if err != nil { - return err - } - - if config, ok := output.(*s3.GetBucketLifecycleConfigurationOutput); !ok || config == nil { - return fmt.Errorf("S3 Bucket Replication Configuration for bucket (%s) not found", rs.Primary.ID) - } + _, err = tfs3.FindLifecycleRules(ctx, conn, bucket, expectedBucketOwner) - return nil + return err } } diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go index 09c88b69cc2..914405be089 100644 --- a/internal/service/s3/bucket_logging.go +++ b/internal/service/s3/bucket_logging.go @@ -94,6 +94,39 @@ func ResourceBucketLogging() *schema.Resource { }, }, }, + "target_object_key_format": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partitioned_prefix": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_date_source": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PartitionDateSource](), + }, + }, + }, + ExactlyOneOf: []string{"target_object_key_format.0.partitioned_prefix", "target_object_key_format.0.simple_prefix"}, + }, + "simple_prefix": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"target_object_key_format.0.partitioned_prefix", "target_object_key_format.0.simple_prefix"}, + }, + }, + }, + }, "target_prefix": { Type: schema.TypeString, Required: true, @@ -125,6 +158,10 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) } + if v, ok := d.GetOk("target_object_key_format"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.BucketLoggingStatus.LoggingEnabled.TargetObjectKeyFormat = expandTargetObjectKeyFormat(v.([]interface{})[0].(map[string]interface{})) + } + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketLogging(ctx, input) }, errCodeNoSuchBucket) @@ -173,6 +210,13 @@ func resourceBucketLoggingRead(ctx context.Context, d *schema.ResourceData, meta if err := d.Set("target_grant", flattenBucketLoggingTargetGrants(loggingEnabled.TargetGrants)); err != nil { return sdkdiag.AppendErrorf(diags, "setting target_grant: %s", err) } + if loggingEnabled.TargetObjectKeyFormat != nil { + if err := d.Set("target_object_key_format", []interface{}{flattenTargetObjectKeyFormat(loggingEnabled.TargetObjectKeyFormat)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_object_key_format: %s", err) + } + } else { + d.Set("target_object_key_format", nil) + } d.Set("target_prefix", loggingEnabled.TargetPrefix) return diags @@ -204,6 +248,10 @@ func resourceBucketLoggingUpdate(ctx context.Context, d *schema.ResourceData, me input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) } + if v, ok := d.GetOk("target_object_key_format"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.BucketLoggingStatus.LoggingEnabled.TargetObjectKeyFormat = expandTargetObjectKeyFormat(v.([]interface{})[0].(map[string]interface{})) + } + _, err = conn.PutBucketLogging(ctx, input) if err != nil { @@ -378,3 +426,65 @@ func flattenBucketLoggingTargetGrantGrantee(g *types.Grantee) []interface{} { return []interface{}{m} } + +func expandTargetObjectKeyFormat(tfMap map[string]interface{}) *types.TargetObjectKeyFormat { + if tfMap == nil { + return nil + } + + apiObject := &types.TargetObjectKeyFormat{} + + if v, ok := tfMap["partitioned_prefix"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.PartitionedPrefix = expandPartitionedPrefix(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["simple_prefix"]; ok && len(v.([]interface{})) > 0 { + apiObject.SimplePrefix = &types.SimplePrefix{} + } + + return apiObject +} + +func expandPartitionedPrefix(tfMap map[string]interface{}) *types.PartitionedPrefix { + if tfMap == nil { + return nil + } + + apiObject := &types.PartitionedPrefix{} + + if v, ok := tfMap["partition_date_source"].(string); ok && v != "" { + apiObject.PartitionDateSource = types.PartitionDateSource(v) + } + + return apiObject +} + +func flattenTargetObjectKeyFormat(apiObject *types.TargetObjectKeyFormat) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.PartitionedPrefix; v != nil { + tfMap["partitioned_prefix"] = []interface{}{flattenPartitionedPrefix(v)} + } + + if apiObject.SimplePrefix != nil { + tfMap["simple_prefix"] = make([]map[string]interface{}, 1) + } + + return tfMap +} + +func flattenPartitionedPrefix(apiObject *types.PartitionedPrefix) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "partition_date_source": apiObject.PartitionDateSource, + } + + return tfMap +} diff --git a/internal/service/s3/bucket_logging_test.go b/internal/service/s3/bucket_logging_test.go index c255f6dd772..54b9de7920c 100644 --- a/internal/service/s3/bucket_logging_test.go +++ b/internal/service/s3/bucket_logging_test.go @@ -6,7 +6,6 @@ package s3_test import ( "context" "fmt" - "os" "testing" "github.com/aws/aws-sdk-go-v2/service/s3/types" @@ -33,13 +32,14 @@ func TestAccS3BucketLogging_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccBucketLoggingConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketLoggingExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "bucket", rName), resource.TestCheckResourceAttr(resourceName, "expected_bucket_owner", ""), resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), - resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), ), }, { @@ -98,8 +98,9 @@ func TestAccS3BucketLogging_update(t *testing.T) { testAccCheckBucketLoggingExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "bucket", rName), resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), - resource.TestCheckResourceAttr(resourceName, "target_prefix", "tmp/"), resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "tmp/"), ), }, { @@ -172,12 +173,7 @@ func TestAccS3BucketLogging_TargetGrantByID(t *testing.T) { func TestAccS3BucketLogging_TargetGrantByEmail(t *testing.T) { ctx := acctest.Context(t) - rEmail, ok := os.LookupEnv("AWS_S3_BUCKET_LOGGING_AMAZON_CUSTOMER_BY_EMAIL") - - if !ok { - acctest.Skip(t, "'AWS_S3_BUCKET_LOGGING_AMAZON_CUSTOMER_BY_EMAIL' not set, skipping test.") - } - + rEmail := acctest.SkipIfEnvVarNotSet(t, "AWS_S3_BUCKET_LOGGING_AMAZON_CUSTOMER_BY_EMAIL") rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_logging.test" @@ -318,6 +314,7 @@ func TestAccS3BucketLogging_migrate_loggingNoChange(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckBucketLoggingExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), ), }, @@ -351,6 +348,7 @@ func TestAccS3BucketLogging_migrate_loggingWithChange(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckBucketLoggingExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), resource.TestCheckResourceAttr(resourceName, "target_prefix", "tmp/"), ), }, @@ -376,8 +374,9 @@ func TestAccS3BucketLogging_withExpectedBucketOwner(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "bucket", rName), acctest.CheckResourceAttrAccountID(resourceName, "expected_bucket_owner"), resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), - resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), ), }, { @@ -389,6 +388,62 @@ func TestAccS3BucketLogging_withExpectedBucketOwner(t *testing.T) { }) } +func TestAccS3BucketLogging_withTargetObjectKeyFormat(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingConfig_withTargetObjectKeyFormatPartitionedPrefix(rName, "EventTime"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.partitioned_prefix.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.partitioned_prefix.0.partition_date_source", "EventTime"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.simple_prefix.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingConfig_withTargetObjectKeyFormatPartitionedPrefix(rName, "DeliveryTime"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.partitioned_prefix.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.partitioned_prefix.0.partition_date_source", "DeliveryTime"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.simple_prefix.#", "0"), + ), + }, + { + Config: testAccBucketLoggingConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "0"), + ), + }, + { + Config: testAccBucketLoggingConfig_withTargetObjectKeyFormatSimplePrefix(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.partitioned_prefix.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_object_key_format.0.simple_prefix.#", "1"), + ), + }, + }, + }) +} + func testAccCheckBucketLoggingDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -586,3 +641,39 @@ resource "aws_s3_bucket_logging" "test" { } `) } + +func testAccBucketLoggingConfig_withTargetObjectKeyFormatPartitionedPrefix(rName, partitionDateSource string) string { + return acctest.ConfigCompose(testAccBucketLoggingConfig_base(rName), fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + + target_object_key_format { + partitioned_prefix { + partition_date_source = %[1]q + } + } +} +`, partitionDateSource)) +} + +func testAccBucketLoggingConfig_withTargetObjectKeyFormatSimplePrefix(rName string) string { + return acctest.ConfigCompose(testAccBucketLoggingConfig_base(rName), ` +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + + target_object_key_format { + simple_prefix {} + } +} +`) +} diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 9a47ddd6a2b..e6c0f8e4e9d 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -7,14 +7,16 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -28,6 +30,7 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketReplicationConfigurationRead, UpdateWithoutTimeout: resourceBucketReplicationConfigurationUpdate, DeleteWithoutTimeout: resourceBucketReplicationConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -44,11 +47,6 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, - "token": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, "rule": { Type: schema.TypeList, Required: true, @@ -62,9 +60,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.DeleteMarkerReplicationStatus](), }, }, }, @@ -82,9 +80,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.OwnerOverride](), }, }, }, @@ -137,9 +135,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.MetricsStatus](), }, }, }, @@ -151,9 +149,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ReplicationTimeStatus](), }, "time": { Type: schema.TypeList, @@ -176,9 +174,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, "storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.StorageClass](), }, }, }, @@ -190,9 +188,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ExistingObjectReplicationStatus](), }, }, }, @@ -272,9 +270,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ReplicaModificationsStatus](), }, }, }, @@ -286,9 +284,9 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.SseKmsEncryptedObjectsStatus](), }, }, }, @@ -297,31 +295,33 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ReplicationRuleStatus](), }, }, }, }, + "token": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, }, } } func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - - rc := &s3.ReplicationConfiguration{ - Role: aws.String(d.Get("role").(string)), - Rules: ExpandReplicationRules(ctx, d.Get("rule").([]interface{})), - } - input := &s3.PutBucketReplicationInput{ - Bucket: aws.String(bucket), - ReplicationConfiguration: rc, + Bucket: aws.String(bucket), + ReplicationConfiguration: &types.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: expandReplicationRules(ctx, d.Get("rule").([]interface{})), + }, } if v, ok := d.GetOk("token"); ok { @@ -329,32 +329,35 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema } err := retry.RetryContext(ctx, s3BucketPropagationTimeout, func() *retry.RetryError { - _, err := conn.PutBucketReplicationWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + _, err := conn.PutBucketReplication(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, errCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { return retry.RetryableError(err) } + if err != nil { return retry.NonRetryableError(err) } + return nil }) if tfresource.TimedOut(err) { - _, err = conn.PutBucketReplicationWithContext(ctx, input) + _, err = conn.PutBucketReplication(ctx, input) } if err != nil { - return sdkdiag.AppendErrorf(diags, "creating S3 replication configuration for bucket (%s): %s", bucket, err) + return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Replication Configuration: %s", bucket, err) } d.SetId(bucket) _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { - return FindBucketReplicationConfigurationByID(ctx, conn, d.Id()) + return findReplicationConfiguration(ctx, conn, d.Id()) }) if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for S3 Replication creation on bucket (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Replication Configuration (%s) create: %s", d.Id(), err) } return append(diags, resourceBucketReplicationConfigurationRead(ctx, d, meta)...) @@ -362,9 +365,9 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema func resourceBucketReplicationConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - output, err := FindBucketReplicationConfigurationByID(ctx, conn, d.Id()) + rc, err := findReplicationConfiguration(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Replication Configuration (%s) not found, removing from state", d.Id()) @@ -373,14 +376,12 @@ func resourceBucketReplicationConfigurationRead(ctx context.Context, d *schema.R } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket Replication Configuration for bucket (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading S3 Bucket Replication Configuration (%s): %s", d.Id(), err) } - r := output.ReplicationConfiguration - d.Set("bucket", d.Id()) - d.Set("role", r.Role) - if err := d.Set("rule", FlattenReplicationRules(ctx, r.Rules)); err != nil { + d.Set("role", rc.Role) + if err := d.Set("rule", flattenReplicationRules(ctx, rc.Rules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting rule: %s", err) } @@ -389,39 +390,24 @@ func resourceBucketReplicationConfigurationRead(ctx context.Context, d *schema.R func resourceBucketReplicationConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) - - rc := &s3.ReplicationConfiguration{ - Role: aws.String(d.Get("role").(string)), - Rules: ExpandReplicationRules(ctx, d.Get("rule").([]interface{})), - } + conn := meta.(*conns.AWSClient).S3Client(ctx) input := &s3.PutBucketReplicationInput{ - Bucket: aws.String(d.Id()), - ReplicationConfiguration: rc, + Bucket: aws.String(d.Id()), + ReplicationConfiguration: &types.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: expandReplicationRules(ctx, d.Get("rule").([]interface{})), + }, } if v, ok := d.GetOk("token"); ok { input.Token = aws.String(v.(string)) } - err := retry.RetryContext(ctx, s3BucketPropagationTimeout, func() *retry.RetryError { - _, err := conn.PutBucketReplicationWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.PutBucketReplicationWithContext(ctx, input) - } + _, err := conn.PutBucketReplication(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 replication configuration for bucket (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating S3 Bucket Replication Configuration (%s): %s", d.Id(), err) } return append(diags, resourceBucketReplicationConfigurationRead(ctx, d, meta)...) @@ -429,36 +415,43 @@ func resourceBucketReplicationConfigurationUpdate(ctx context.Context, d *schema func resourceBucketReplicationConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - input := &s3.DeleteBucketReplicationInput{ + log.Printf("[DEBUG] Deleting S3 Bucket Replication Configuration: %s", d.Id()) + _, err := conn.DeleteBucketReplication(ctx, &s3.DeleteBucketReplicationInput{ Bucket: aws.String(d.Id()), - } - - _, err := conn.DeleteBucketReplicationWithContext(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeReplicationConfigurationNotFound) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting S3 bucket replication configuration for bucket (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Replication Configuration (%s): %s", d.Id(), err) + } + + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findReplicationConfiguration(ctx, conn, d.Id()) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Replication Configuration (%s) delete: %s", d.Id(), err) } return diags } -func FindBucketReplicationConfigurationByID(ctx context.Context, conn *s3.S3, id string) (*s3.GetBucketReplicationOutput, error) { - in := &s3.GetBucketReplicationInput{ - Bucket: aws.String(id), +func findReplicationConfiguration(ctx context.Context, conn *s3.Client, bucket string) (*types.ReplicationConfiguration, error) { + input := &s3.GetBucketReplicationInput{ + Bucket: aws.String(bucket), } - out, err := conn.GetBucketReplicationWithContext(ctx, in) + output, err := conn.GetBucketReplication(ctx, input) - if tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeReplicationConfigurationNotFound) { return nil, &retry.NotFoundError{ LastError: err, - LastRequest: in, + LastRequest: input, } } @@ -466,9 +459,678 @@ func FindBucketReplicationConfigurationByID(ctx context.Context, conn *s3.S3, id return nil, err } - if out == nil || out.ReplicationConfiguration == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil || output.ReplicationConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ReplicationConfiguration, nil +} + +func expandReplicationRules(ctx context.Context, l []interface{}) []types.ReplicationRule { + var rules []types.ReplicationRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + rule := types.ReplicationRule{} + + if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.DeleteMarkerReplication = expandReplicationRuleDeleteMarkerReplication(v) + } + + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Destination = expandReplicationRuleDestination(v) + } + + if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ExistingObjectReplication = expandReplicationRuleExistingObjectReplication(v) + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + rule.ID = aws.String(v) + } + + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.SourceSelectionCriteria = expandReplicationRuleSourceSelectionCriteria(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + rule.Status = types.ReplicationRuleStatus(v) + } + + // Support the empty filter block in terraform i.e. 'filter {}', + // which implies the replication rule does not require a specific filter, + // by expanding the "filter" array even if the first element is nil. + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { + // XML schema V2 + rule.Filter = expandReplicationRuleFilter(ctx, v) + rule.Priority = aws.Int32(int32(tfMap["priority"].(int))) + } else { + // XML schema V1 + rule.Prefix = aws.String(tfMap["prefix"].(string)) + } + + rules = append(rules, rule) + } + + return rules +} + +func expandReplicationRuleDeleteMarkerReplication(l []interface{}) *types.DeleteMarkerReplication { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.DeleteMarkerReplication{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.DeleteMarkerReplicationStatus(v) + } + + return result +} + +func expandReplicationRuleDestination(l []interface{}) *types.Destination { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.Destination{} + + if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.AccessControlTranslation = expandReplicationRuleDestinationAccessControlTranslation(v) + } + + if v, ok := tfMap["account"].(string); ok && v != "" { + result.Account = aws.String(v) + } + + if v, ok := tfMap["bucket"].(string); ok && v != "" { + result.Bucket = aws.String(v) + } + + if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EncryptionConfiguration = expandReplicationRuleDestinationEncryptionConfiguration(v) + } + + if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Metrics = expandReplicationRuleDestinationMetrics(v) + } + + if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicationTime = expandReplicationRuleDestinationReplicationTime(v) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + result.StorageClass = types.StorageClass(v) + } + + return result +} + +func expandReplicationRuleDestinationAccessControlTranslation(l []interface{}) *types.AccessControlTranslation { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.AccessControlTranslation{} + + if v, ok := tfMap["owner"].(string); ok && v != "" { + result.Owner = types.OwnerOverride(v) + } + + return result +} + +func expandReplicationRuleDestinationEncryptionConfiguration(l []interface{}) *types.EncryptionConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.EncryptionConfiguration{} + + if v, ok := tfMap["replica_kms_key_id"].(string); ok && v != "" { + result.ReplicaKmsKeyID = aws.String(v) + } + + return result +} + +func expandReplicationRuleDestinationMetrics(l []interface{}) *types.Metrics { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.Metrics{} + + if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EventThreshold = expandReplicationRuleDestinationReplicationTimeValue(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.MetricsStatus(v) + } + + return result +} + +func expandReplicationRuleDestinationReplicationTime(l []interface{}) *types.ReplicationTime { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ReplicationTime{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.ReplicationTimeStatus(v) + } + + if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Time = expandReplicationRuleDestinationReplicationTimeValue(v) + } + + return result +} + +func expandReplicationRuleDestinationReplicationTimeValue(l []interface{}) *types.ReplicationTimeValue { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ReplicationTimeValue{} + + if v, ok := tfMap["minutes"].(int); ok { + result.Minutes = aws.Int32(int32(v)) + } + + return result +} + +func expandReplicationRuleExistingObjectReplication(l []interface{}) *types.ExistingObjectReplication { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ExistingObjectReplication{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.ExistingObjectReplicationStatus(v) + } + + return result +} + +func expandReplicationRuleSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.SourceSelectionCriteria{} + + if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicaModifications = expandSourceSelectionCriteriaReplicaModifications(v) + } + + if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.SseKmsEncryptedObjects = expandSourceSelectionCriteriaSSEKMSEncryptedObjects(v) + } + + return result +} + +func expandSourceSelectionCriteriaReplicaModifications(l []interface{}) *types.ReplicaModifications { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ReplicaModifications{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.ReplicaModificationsStatus(v) + } + + return result +} + +func expandSourceSelectionCriteriaSSEKMSEncryptedObjects(l []interface{}) *types.SseKmsEncryptedObjects { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.SseKmsEncryptedObjects{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = types.SseKmsEncryptedObjectsStatus(v) + } + + return result +} + +func expandReplicationRuleFilter(ctx context.Context, l []interface{}) types.ReplicationRuleFilter { + if len(l) == 0 || l[0] == nil { + return &types.ReplicationRuleFilterMemberPrefix{} + } + + tfMap := l[0].(map[string]interface{}) + var result types.ReplicationRuleFilter + + if v, ok := tfMap["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result = expandReplicationRuleFilterMemberAnd(ctx, v) + } + + if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result = expandReplicationRuleFilterMemberTag(v) + } + + // Per AWS S3 API, "A Filter must have exactly one of Prefix, Tag, or And specified"; + // Specifying more than one of the listed parameters results in a MalformedXML error. + // If a filter is specified as filter { prefix = "" } in Terraform, we should send the prefix value + // in the API request even if it is an empty value, else Terraform will report non-empty plans. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23487 + if v, ok := tfMap["prefix"].(string); ok && result == nil { + result = &types.ReplicationRuleFilterMemberPrefix{ + Value: v, + } + } + + return result +} + +func expandReplicationRuleFilterMemberAnd(ctx context.Context, l []interface{}) *types.ReplicationRuleFilterMemberAnd { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ReplicationRuleFilterMemberAnd{ + Value: types.ReplicationRuleAndOperator{}, + } + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + result.Value.Prefix = aws.String(v) + } + + if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { + tags := tagsV2(tftags.New(ctx, v).IgnoreAWS()) + if len(tags) > 0 { + result.Value.Tags = tags + } + } + + return result +} + +func expandReplicationRuleFilterMemberTag(l []interface{}) *types.ReplicationRuleFilterMemberTag { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &types.ReplicationRuleFilterMemberTag{ + Value: types.Tag{}, + } + + if v, ok := tfMap["key"].(string); ok && v != "" { + result.Value.Key = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + result.Value.Value = aws.String(v) + } + + return result +} + +func flattenReplicationRules(ctx context.Context, rules []types.ReplicationRule) []interface{} { + if len(rules) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, rule := range rules { + m := map[string]interface{}{ + "priority": rule.Priority, + "status": rule.Status, + } + + if rule.DeleteMarkerReplication != nil { + m["delete_marker_replication"] = flattenReplicationRuleDeleteMarkerReplication(rule.DeleteMarkerReplication) + } + + if rule.Destination != nil { + m["destination"] = flattenReplicationRuleDestination(rule.Destination) + } + + if rule.ExistingObjectReplication != nil { + m["existing_object_replication"] = flattenReplicationRuleExistingObjectReplication(rule.ExistingObjectReplication) + } + + if rule.Filter != nil { + m["filter"] = flattenReplicationRuleFilter(ctx, rule.Filter) + } + + if rule.ID != nil { + m["id"] = aws.ToString(rule.ID) + } + + if rule.Prefix != nil { + m["prefix"] = aws.ToString(rule.Prefix) + } + + if rule.SourceSelectionCriteria != nil { + m["source_selection_criteria"] = flattenReplicationRuleSourceSelectionCriteria(rule.SourceSelectionCriteria) + } + + results = append(results, m) + } + + return results +} + +func flattenReplicationRuleDeleteMarkerReplication(dmr *types.DeleteMarkerReplication) []interface{} { + if dmr == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": dmr.Status, + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestination(dest *types.Destination) []interface{} { + if dest == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "storage_class": dest.StorageClass, + } + + if dest.AccessControlTranslation != nil { + m["access_control_translation"] = flattenReplicationRuleDestinationAccessControlTranslation(dest.AccessControlTranslation) + } + + if dest.Account != nil { + m["account"] = aws.ToString(dest.Account) + } + + if dest.Bucket != nil { + m["bucket"] = aws.ToString(dest.Bucket) + } + + if dest.EncryptionConfiguration != nil { + m["encryption_configuration"] = flattenReplicationRuleDestinationEncryptionConfiguration(dest.EncryptionConfiguration) + } + + if dest.Metrics != nil { + m["metrics"] = flattenReplicationRuleDestinationMetrics(dest.Metrics) + } + + if dest.ReplicationTime != nil { + m["replication_time"] = flattenReplicationRuleDestinationReplicationTime(dest.ReplicationTime) + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestinationAccessControlTranslation(act *types.AccessControlTranslation) []interface{} { + if act == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "owner": act.Owner, + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestinationEncryptionConfiguration(ec *types.EncryptionConfiguration) []interface{} { + if ec == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ec.ReplicaKmsKeyID != nil { + m["replica_kms_key_id"] = aws.ToString(ec.ReplicaKmsKeyID) + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestinationMetrics(metrics *types.Metrics) []interface{} { + if metrics == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": metrics.Status, + } + + if metrics.EventThreshold != nil { + m["event_threshold"] = flattenReplicationRuleDestinationReplicationTimeValue(metrics.EventThreshold) + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestinationReplicationTimeValue(rtv *types.ReplicationTimeValue) []interface{} { + if rtv == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "minutes": rtv.Minutes, + } + + return []interface{}{m} +} + +func flattenReplicationRuleDestinationReplicationTime(rt *types.ReplicationTime) []interface{} { + if rt == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": rt.Status, + } + + if rt.Time != nil { + m["time"] = flattenReplicationRuleDestinationReplicationTimeValue(rt.Time) + } + + return []interface{}{m} +} + +func flattenReplicationRuleExistingObjectReplication(eor *types.ExistingObjectReplication) []interface{} { + if eor == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": eor.Status, + } + + return []interface{}{m} +} + +func flattenReplicationRuleFilter(ctx context.Context, filter types.ReplicationRuleFilter) []interface{} { + if filter == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + switch v := filter.(type) { + case *types.ReplicationRuleFilterMemberAnd: + m["and"] = flattenReplicationRuleFilterMemberAnd(ctx, v) + case *types.ReplicationRuleFilterMemberPrefix: + m["prefix"] = v.Value + case *types.ReplicationRuleFilterMemberTag: + m["tag"] = flattenReplicationRuleFilterMemberTag(v) + default: + return nil + } + + return []interface{}{m} +} + +func flattenReplicationRuleFilterMemberAnd(ctx context.Context, op *types.ReplicationRuleFilterMemberAnd) []interface{} { + if op == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if v := op.Value.Prefix; v != nil { + m["prefix"] = aws.ToString(v) + } + + if v := op.Value.Tags; v != nil { + m["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + } + + return []interface{}{m} +} + +func flattenReplicationRuleFilterMemberTag(op *types.ReplicationRuleFilterMemberTag) []interface{} { + if op == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if v := op.Value.Key; v != nil { + m["key"] = aws.ToString(v) + } + + if v := op.Value.Value; v != nil { + m["value"] = aws.ToString(v) + } + + return []interface{}{m} +} + +func flattenReplicationRuleSourceSelectionCriteria(ssc *types.SourceSelectionCriteria) []interface{} { + if ssc == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ssc.ReplicaModifications != nil { + m["replica_modifications"] = flattenSourceSelectionCriteriaReplicaModifications(ssc.ReplicaModifications) + } + + if ssc.SseKmsEncryptedObjects != nil { + m["sse_kms_encrypted_objects"] = flattenSourceSelectionCriteriaSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) + } + + return []interface{}{m} +} + +func flattenSourceSelectionCriteriaReplicaModifications(rc *types.ReplicaModifications) []interface{} { + if rc == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": rc.Status, + } + + return []interface{}{m} +} + +func flattenSourceSelectionCriteriaSSEKMSEncryptedObjects(objects *types.SseKmsEncryptedObjects) []interface{} { + if objects == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "status": objects.Status, } - return out, nil + return []interface{}{m} } diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index b40625178cd..d68c33e7300 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -7,11 +7,8 @@ import ( "context" "fmt" "testing" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -20,6 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { @@ -37,12 +35,12 @@ func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ { - Config: testAccBucketReplicationConfigurationConfig_basic(rName, s3.StorageClassStandard), + Config: testAccBucketReplicationConfigurationConfig_basic(rName, string(types.StorageClassStandard)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketReplicationConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), @@ -50,15 +48,15 @@ func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccBucketReplicationConfigurationConfig_basic(rName, s3.StorageClassGlacier), + Config: testAccBucketReplicationConfigurationConfig_basic(rName, string(types.StorageClassGlacier)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketReplicationConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), @@ -66,9 +64,9 @@ func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassGlacier, + "destination.0.storage_class": string(types.StorageClassGlacier), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), @@ -82,13 +80,13 @@ func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", "destination.0.encryption_configuration.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), "source_selection_criteria.#": "1", "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", - "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": string(types.SseKmsEncryptedObjectsStatusEnabled), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.encryption_configuration.0.replica_kms_key_id", kmsKeyResourceName, "arn"), @@ -107,12 +105,12 @@ func TestAccS3BucketReplicationConfiguration_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), CheckDestroy: nil, Steps: []resource.TestStep{ { - Config: testAccBucketReplicationConfigurationConfig_basic(rName, s3.StorageClassStandard), + Config: testAccBucketReplicationConfigurationConfig_basic(rName, string(types.StorageClassStandard)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketReplicationConfigurationExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucketReplicationConfiguration(), resourceName), @@ -136,7 +134,7 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsEmptyFilter(t * acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -148,29 +146,29 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsEmptyFilter(t * resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandardIa, + "destination.0.storage_class": string(types.StorageClassStandardIa), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule3", "priority": "3", - "status": s3.ReplicationRuleStatusDisabled, + "status": string(types.ReplicationRuleStatusDisabled), "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassOnezoneIa, + "destination.0.storage_class": string(types.StorageClassOnezoneIa), }), ), }, @@ -196,7 +194,7 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter( acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -208,34 +206,34 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter( resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.tag.#": "1", "filter.0.tag.0.key": "Key2", "filter.0.tag.0.value": "Value2", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandardIa, + "destination.0.storage_class": string(types.StorageClassStandardIa), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule3", "priority": "3", - "status": s3.ReplicationRuleStatusDisabled, + "status": string(types.ReplicationRuleStatusDisabled), "filter.#": "1", "filter.0.and.#": "1", "filter.0.and.0.prefix": "prefix3", "filter.0.and.0.tags.%": "1", "filter.0.and.0.tags.Key3": "Value3", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassOnezoneIa, + "destination.0.storage_class": string(types.StorageClassOnezoneIa), }), ), }, @@ -263,7 +261,7 @@ func TestAccS3BucketReplicationConfiguration_twoDestination(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -275,20 +273,20 @@ func TestAccS3BucketReplicationConfiguration_twoDestination(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandardIa, + "destination.0.storage_class": string(types.StorageClassStandardIa), }), ), }, @@ -318,7 +316,7 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAccessC acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -331,11 +329,11 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAccessC resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", "destination.0.access_control_translation.#": "1", - "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.access_control_translation.0.owner": string(types.OwnerOverrideDestination), + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -355,15 +353,15 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAccessC resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", "destination.0.access_control_translation.#": "1", - "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.access_control_translation.0.owner": string(types.OwnerOverrideDestination), "destination.0.encryption_configuration.#": "1", "source_selection_criteria.#": "1", "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", - "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, - "destination.0.storage_class": s3.StorageClassStandard, + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": string(types.SseKmsEncryptedObjectsStatusEnabled), + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -396,7 +394,7 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAddAcce acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -409,9 +407,9 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAddAcce resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -431,11 +429,11 @@ func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAddAcce resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", "destination.0.access_control_translation.#": "1", - "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.access_control_translation.0.owner": string(types.OwnerOverrideDestination), + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -465,7 +463,7 @@ func TestAccS3BucketReplicationConfiguration_replicationTimeControl(t *testing.T acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -479,16 +477,16 @@ func TestAccS3BucketReplicationConfiguration_replicationTimeControl(t *testing.T "id": "foobar", "filter.#": "1", "filter.0.prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusEnabled), "destination.#": "1", "destination.0.replication_time.#": "1", - "destination.0.replication_time.0.status": s3.ReplicationTimeStatusEnabled, + "destination.0.replication_time.0.status": string(types.ReplicationTimeStatusEnabled), "destination.0.replication_time.0.time.#": "1", "destination.0.replication_time.0.time.0.minutes": "15", "destination.0.metrics.#": "1", - "destination.0.metrics.0.status": s3.MetricsStatusEnabled, + "destination.0.metrics.0.status": string(types.MetricsStatusEnabled), "destination.0.metrics.0.event_threshold.#": "1", "destination.0.metrics.0.event_threshold.0.minutes": "15", }), @@ -519,7 +517,7 @@ func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T) acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -534,11 +532,11 @@ func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T) "filter.#": "1", "filter.0.prefix": "foo", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusEnabled), "source_selection_criteria.#": "1", "source_selection_criteria.0.replica_modifications.#": "1", - "source_selection_criteria.0.replica_modifications.0.status": s3.ReplicaModificationsStatusEnabled, - "status": s3.ReplicationRuleStatusEnabled, + "source_selection_criteria.0.replica_modifications.0.status": string(types.ReplicaModificationsStatusEnabled), + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -568,7 +566,7 @@ func TestAccS3BucketReplicationConfiguration_withoutId(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -580,7 +578,7 @@ func TestAccS3BucketReplicationConfiguration_withoutId(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"), resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "foo"), - resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", string(types.ReplicationRuleStatusEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"), ), @@ -610,7 +608,7 @@ func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -623,7 +621,7 @@ func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "prefix": "foo", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -653,7 +651,7 @@ func TestAccS3BucketReplicationConfiguration_schemaV2(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -668,10 +666,10 @@ func TestAccS3BucketReplicationConfiguration_schemaV2(t *testing.T) { "filter.#": "1", "filter.0.prefix": "foo", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, - "status": s3.ReplicationRuleStatusEnabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusEnabled), + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), @@ -695,7 +693,7 @@ func TestAccS3BucketReplicationConfiguration_schemaV2SameRegion(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketReplicationConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -710,10 +708,10 @@ func TestAccS3BucketReplicationConfiguration_schemaV2SameRegion(t *testing.T) { "filter.#": "1", "filter.0.prefix": "testprefix", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, - "status": s3.ReplicationRuleStatusEnabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusEnabled), + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), @@ -740,18 +738,18 @@ func TestAccS3BucketReplicationConfiguration_schemaV2DestinationMetrics(t *testi acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ { - Config: testAccBucketReplicationConfigurationConfig_schemaV2DestinationMetricsStatusOnly(rName, s3.StorageClassStandard), + Config: testAccBucketReplicationConfigurationConfig_schemaV2DestinationMetricsStatusOnly(rName, string(types.StorageClassStandard)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketReplicationConfigurationExists(ctx, resourceName), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "destination.#": "1", "destination.0.metrics.#": "1", - "destination.0.metrics.0.status": s3.MetricsStatusEnabled, + "destination.0.metrics.0.status": string(types.MetricsStatusEnabled), "destination.0.metrics.0.event_threshold.#": "0", }), ), @@ -780,7 +778,7 @@ func TestAccS3BucketReplicationConfiguration_existingObjectReplication(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -795,12 +793,12 @@ func TestAccS3BucketReplicationConfiguration_existingObjectReplication(t *testin "filter.#": "1", "filter.0.prefix": "testprefix", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusEnabled), "existing_object_replication.#": "1", - "existing_object_replication.0.status": s3.ExistingObjectReplicationStatusEnabled, - "status": s3.ReplicationRuleStatusEnabled, + "existing_object_replication.0.status": string(types.ExistingObjectReplicationStatusEnabled), + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", - "destination.0.storage_class": s3.StorageClassStandard, + "destination.0.storage_class": string(types.StorageClassStandard), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), @@ -827,7 +825,7 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyConfigurationBlock(t *t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -840,9 +838,9 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyConfigurationBlock(t *t resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusDisabled), "filter.#": "1", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -870,7 +868,7 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyPrefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -883,10 +881,10 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyPrefix(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusDisabled), "filter.#": "1", "filter.0.prefix": "", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -917,7 +915,7 @@ func TestAccS3BucketReplicationConfiguration_filter_tagFilter(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -930,12 +928,12 @@ func TestAccS3BucketReplicationConfiguration_filter_tagFilter(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusDisabled), "filter.#": "1", "filter.0.tag.#": "1", "filter.0.tag.0.key": "testkey", "filter.0.tag.0.value": "testvalue", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -962,7 +960,7 @@ func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -975,14 +973,14 @@ func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusDisabled), "filter.#": "1", "filter.0.and.#": "1", "filter.0.and.0.prefix": "foo", "filter.0.and.0.tags.%": "2", "filter.0.and.0.tags.testkey1": "testvalue1", "filter.0.and.0.tags.testkey2": "testvalue2", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -1002,13 +1000,13 @@ func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "foobar", "delete_marker_replication.#": "1", - "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "delete_marker_replication.0.status": string(types.DeleteMarkerReplicationStatusDisabled), "filter.#": "1", "filter.0.and.#": "1", "filter.0.and.0.tags.%": "2", "filter.0.and.0.tags.testkey1": "testvalue1", "filter.0.and.0.tags.testkey2": "testvalue2", - "status": s3.ReplicationRuleStatusEnabled, + "status": string(types.ReplicationRuleStatusEnabled), "destination.#": "1", }), resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), @@ -1038,7 +1036,7 @@ func TestAccS3BucketReplicationConfiguration_filter_withoutId(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1050,7 +1048,7 @@ func TestAccS3BucketReplicationConfiguration_filter_withoutId(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"), resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", string(types.ReplicationRuleStatusEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.0.delete_marker_replication.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"), @@ -1076,7 +1074,7 @@ func TestAccS3BucketReplicationConfiguration_withoutPrefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1107,7 +1105,7 @@ func TestAccS3BucketReplicationConfiguration_migrate_noChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1150,7 +1148,7 @@ func TestAccS3BucketReplicationConfiguration_migrate_withChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1183,19 +1181,16 @@ func TestAccS3BucketReplicationConfiguration_migrate_withChange(t *testing.T) { // version, but for use with "same region" tests requiring only one provider. func testAccCheckBucketReplicationConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_replication_configuration" { continue } - input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketReplicationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfs3.FindReplicationConfiguration(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + if tfresource.NotFound(err) { continue } @@ -1203,9 +1198,7 @@ func testAccCheckBucketReplicationConfigurationDestroy(ctx context.Context) reso return err } - if replication, ok := output.(*s3.GetBucketReplicationOutput); ok && replication != nil && replication.ReplicationConfiguration != nil { - return fmt.Errorf("S3 Replication Configuration for bucket (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Bucket Replication Configuration %s still exists", rs.Primary.ID) } return nil @@ -1214,19 +1207,16 @@ func testAccCheckBucketReplicationConfigurationDestroy(ctx context.Context) reso func testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx context.Context) acctest.TestCheckWithProviderFunc { return func(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_replication_configuration" { continue } - input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketReplicationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfs3.FindReplicationConfiguration(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + if tfresource.NotFound(err) { continue } @@ -1234,9 +1224,7 @@ func testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx context.C return err } - if replication, ok := output.(*s3.GetBucketReplicationOutput); ok && replication != nil && replication.ReplicationConfiguration != nil { - return fmt.Errorf("S3 Replication Configuration for bucket (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Bucket Replication Configuration %s still exists", rs.Primary.ID) } return nil @@ -1250,19 +1238,15 @@ func testAccCheckBucketReplicationConfigurationExists(ctx context.Context, n str return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - _, err := tfs3.FindBucketReplicationConfigurationByID(ctx, conn, rs.Primary.ID) + _, err := tfs3.FindReplicationConfiguration(ctx, conn, rs.Primary.ID) return err } } -func testAccBucketReplicationConfigurationBase(rName string) string { +func testAccBucketReplicationConfigurationConfig_base(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -1312,7 +1296,7 @@ resource "aws_s3_bucket_versioning" "source" { } func testAccBucketReplicationConfigurationConfig_basic(rName, storageClass string) string { - return testAccBucketReplicationConfigurationBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1332,12 +1316,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { storage_class = %[1]q } } -}`, storageClass) +}`, storageClass)) } func testAccBucketReplicationConfigurationConfig_prefixNoID(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1359,8 +1342,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_filterNoID(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1387,9 +1369,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_rtc(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1428,7 +1408,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_replicaMods(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1457,13 +1437,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { bucket = aws_s3_bucket.destination.arn } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_multipleDestinationsEmptyFilter(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" @@ -1554,9 +1532,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_multipleDestinationsNonEmptyFilter(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" @@ -1660,9 +1636,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_multipleDestinationsTwoDestination(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" @@ -1726,7 +1700,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_sseKMSEncryptedObjects(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_kms_key" "test" { provider = "awsalternate" description = "TF Acceptance Test S3 repl KMS key" @@ -1763,11 +1737,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_accessControlTranslation(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "test" { @@ -1794,11 +1768,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_rulesDestination(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "test" { @@ -1821,11 +1795,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { storage_class = "STANDARD" } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_sseKMSEncryptedObjectsAndAccessControlTranslation(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` data "aws_caller_identity" "current" {} resource "aws_kms_key" "test" { @@ -1867,11 +1841,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_noStorageClass(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1890,11 +1864,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { bucket = aws_s3_bucket.destination.arn } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_v2NoTags(rName string) string { - return testAccBucketReplicationConfigurationBase(rName) + ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -1921,7 +1895,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { storage_class = "STANDARD" } } -}` +}`) } func testAccBucketReplicationConfigurationConfig_schemaV2SameRegion(rName, rNameDestination string) string { @@ -2078,9 +2052,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_filterEmptyBlock(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [aws_s3_bucket_versioning.source] @@ -2106,8 +2078,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_filterEmptyPrefix(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [aws_s3_bucket_versioning.source] @@ -2131,14 +2102,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { bucket = aws_s3_bucket.destination.arn } } -}`, - ) +}`) } func testAccBucketReplicationConfigurationConfig_filterTag(rName, key, value string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2172,9 +2140,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_filterAndOperatorTags(rName, key1, value1, key2, value2 string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2210,9 +2176,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_filterAndOperatorPrefixAndTags(rName, key1, value1, key2, value2 string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2249,7 +2213,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationConfig_schemaV2DestinationMetricsStatusOnly(rName, storageClass string) string { - return testAccBucketReplicationConfigurationBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2279,13 +2243,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } } -}`, storageClass) +}`, storageClass)) } func testAccBucketReplicationConfigurationConfig_noPrefix(rName string) string { - return acctest.ConfigCompose( - testAccBucketReplicationConfigurationBase(rName), - ` + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2307,7 +2269,7 @@ resource "aws_s3_bucket_replication_configuration" "test" { }`) } -func testAccBucketReplicationConfigurationMigrationBase(rName string) string { +func testAccBucketReplicationConfigurationConfig_migrationBase(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -2358,9 +2320,7 @@ resource "aws_s3_bucket_versioning" "destination" { } func testAccBucketReplicationConfigurationConfig_migrateNoChange(rName string) string { - return acctest.ConfigCompose( - acctest.ConfigMultipleRegionProvider(2), - testAccBucketReplicationConfigurationMigrationBase(rName), ` + return acctest.ConfigCompose(acctest.ConfigMultipleRegionProvider(2), testAccBucketReplicationConfigurationConfig_migrationBase(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2396,14 +2356,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } } -}`, - ) +}`) } func testAccBucketReplicationConfigurationConfig_migrateChange(rName string) string { - return acctest.ConfigCompose( - acctest.ConfigMultipleRegionProvider(2), - testAccBucketReplicationConfigurationMigrationBase(rName), ` + return acctest.ConfigCompose(acctest.ConfigMultipleRegionProvider(2), testAccBucketReplicationConfigurationConfig_migrationBase(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { depends_on = [ aws_s3_bucket_versioning.source, @@ -2432,6 +2389,5 @@ resource "aws_s3_bucket_replication_configuration" "test" { prefix = "bar" } } -}`, - ) +}`) } diff --git a/internal/service/s3/enum.go b/internal/service/s3/enum.go index 0abc9157708..72a62fefe41 100644 --- a/internal/service/s3/enum.go +++ b/internal/service/s3/enum.go @@ -4,8 +4,3 @@ package s3 const DefaultKMSKeyAlias = "alias/aws/s3" - -const ( - LifecycleRuleStatusEnabled = "Enabled" - LifecycleRuleStatusDisabled = "Disabled" -) diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 758f1839cbd..ba0798bd35f 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -17,7 +17,7 @@ const ( errCodeNoSuchBucketPolicy = "NoSuchBucketPolicy" errCodeNoSuchConfiguration = "NoSuchConfiguration" errCodeNoSuchCORSConfiguration = "NoSuchCORSConfiguration" - ErrCodeNoSuchLifecycleConfiguration = "NoSuchLifecycleConfiguration" + errCodeNoSuchLifecycleConfiguration = "NoSuchLifecycleConfiguration" errCodeNoSuchKey = "NoSuchKey" ErrCodeNoSuchPublicAccessBlockConfiguration = "NoSuchPublicAccessBlockConfiguration" errCodeNoSuchTagSet = "NoSuchTagSet" @@ -30,7 +30,7 @@ const ( errCodeObjectLockConfigurationNotFoundError = "ObjectLockConfigurationNotFoundError" errCodeOperationAborted = "OperationAborted" errCodeOwnershipControlsNotFoundError = "OwnershipControlsNotFoundError" - ErrCodeReplicationConfigurationNotFound = "ReplicationConfigurationNotFoundError" + errCodeReplicationConfigurationNotFound = "ReplicationConfigurationNotFoundError" errCodeServerSideEncryptionConfigurationNotFound = "ServerSideEncryptionConfigurationNotFoundError" errCodeUnsupportedArgument = "UnsupportedArgument" // errCodeXNotImplemented is returned from Third Party S3 implementations diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 77bc655eb55..2019e6c7a60 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -19,13 +19,17 @@ var ( FindCORSRules = findCORSRules FindIntelligentTieringConfiguration = findIntelligentTieringConfiguration FindInventoryConfiguration = findInventoryConfiguration + FindLifecycleRules = findLifecycleRules FindLoggingEnabled = findLoggingEnabled FindMetricsConfiguration = findMetricsConfiguration FindObjectByBucketAndKey = findObjectByBucketAndKey FindObjectLockConfiguration = findObjectLockConfiguration FindOwnershipControls = findOwnershipControls + FindReplicationConfiguration = findReplicationConfiguration FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration + LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled + LifecycleRuleStatusEnabled = lifecycleRuleStatusEnabled ) diff --git a/internal/service/s3/flex.go b/internal/service/s3/flex.go deleted file mode 100644 index 1ce441db303..00000000000 --- a/internal/service/s3/flex.go +++ /dev/null @@ -1,1266 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package s3 - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" -) - -func ExpandReplicationRuleDestinationAccessControlTranslation(l []interface{}) *s3.AccessControlTranslation { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.AccessControlTranslation{} - - if v, ok := tfMap["owner"].(string); ok && v != "" { - result.Owner = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleDestinationEncryptionConfiguration(l []interface{}) *s3.EncryptionConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.EncryptionConfiguration{} - - if v, ok := tfMap["replica_kms_key_id"].(string); ok && v != "" { - result.ReplicaKmsKeyID = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleDeleteMarkerReplication(l []interface{}) *s3.DeleteMarkerReplication { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.DeleteMarkerReplication{} - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleDestination(l []interface{}) *s3.Destination { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.Destination{} - - if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.AccessControlTranslation = ExpandReplicationRuleDestinationAccessControlTranslation(v) - } - - if v, ok := tfMap["account"].(string); ok && v != "" { - result.Account = aws.String(v) - } - - if v, ok := tfMap["bucket"].(string); ok && v != "" { - result.Bucket = aws.String(v) - } - - if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EncryptionConfiguration = ExpandReplicationRuleDestinationEncryptionConfiguration(v) - } - - if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Metrics = ExpandReplicationRuleDestinationMetrics(v) - } - - if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicationTime = ExpandReplicationRuleDestinationReplicationTime(v) - } - - if v, ok := tfMap["storage_class"].(string); ok && v != "" { - result.StorageClass = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplication { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.ExistingObjectReplication{} - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleFilter(ctx context.Context, l []interface{}) *s3.ReplicationRuleFilter { - if len(l) == 0 { - return nil - } - - result := &s3.ReplicationRuleFilter{} - - // Support the empty filter block in terraform i.e. 'filter {}', - // which is also supported by the API even though the docs note that - // one of Prefix, Tag, or And is required. - if l[0] == nil { - return result - } - - tfMap := l[0].(map[string]interface{}) - - if v, ok := tfMap["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.And = ExpandReplicationRuleFilterAndOperator(ctx, v) - } - - if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Tag = ExpandReplicationRuleFilterTag(v) - } - - // Per AWS S3 API, "A Filter must have exactly one of Prefix, Tag, or And specified"; - // Specifying more than one of the listed parameters results in a MalformedXML error. - // If a filter is specified as filter { prefix = "" } in Terraform, we should send the prefix value - // in the API request even if it is an empty value, else Terraform will report non-empty plans. - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23487 - if v, ok := tfMap["prefix"].(string); ok && result.And == nil && result.Tag == nil { - result.Prefix = aws.String(v) - } - - return result -} - -func ExpandLifecycleRuleAbortIncompleteMultipartUpload(m map[string]interface{}) *s3.AbortIncompleteMultipartUpload { - if len(m) == 0 { - return nil - } - - result := &s3.AbortIncompleteMultipartUpload{} - - if v, ok := m["days_after_initiation"].(int); ok { - result.DaysAfterInitiation = aws.Int64(int64(v)) - } - - return result -} - -func ExpandLifecycleRuleExpiration(l []interface{}) (*s3.LifecycleExpiration, error) { - if len(l) == 0 { - return nil, nil - } - - result := &s3.LifecycleExpiration{} - - if l[0] == nil { - return result, nil - } - - m := l[0].(map[string]interface{}) - - if v, ok := m["date"].(string); ok && v != "" { - t, err := time.Parse(time.RFC3339, v) - if err != nil { - return nil, fmt.Errorf("parsing S3 Bucket Lifecycle Rule Expiration date: %w", err) - } - result.Date = aws.Time(t) - } - - if v, ok := m["days"].(int); ok && v > 0 { - result.Days = aws.Int64(int64(v)) - } - - // This cannot be specified with Days or Date - if v, ok := m["expired_object_delete_marker"].(bool); ok && result.Date == nil && result.Days == nil { - result.ExpiredObjectDeleteMarker = aws.Bool(v) - } - - return result, nil -} - -// ExpandLifecycleRuleFilter ensures a Filter can have only 1 of prefix, tag, or and -func ExpandLifecycleRuleFilter(ctx context.Context, l []interface{}) *s3.LifecycleRuleFilter { - if len(l) == 0 { - return nil - } - - result := &s3.LifecycleRuleFilter{} - - if l[0] == nil { - return result - } - - m := l[0].(map[string]interface{}) - - if v, ok := m["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.And = ExpandLifecycleRuleFilterAndOperator(ctx, v[0].(map[string]interface{})) - } - - if v, null, _ := nullable.Int(m["object_size_greater_than"].(string)).Value(); !null && v >= 0 { - result.ObjectSizeGreaterThan = aws.Int64(v) - } - - if v, null, _ := nullable.Int(m["object_size_less_than"].(string)).Value(); !null && v > 0 { - result.ObjectSizeLessThan = aws.Int64(v) - } - - if v, ok := m["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Tag = ExpandLifecycleRuleFilterTag(v[0].(map[string]interface{})) - } - - // Per AWS S3 API, "A Filter must have exactly one of Prefix, Tag, or And specified"; - // Specifying more than one of the listed parameters results in a MalformedXML error. - // In practice, this also includes ObjectSizeGreaterThan and ObjectSizeLessThan. - if v, ok := m["prefix"].(string); ok && result.And == nil && result.Tag == nil && result.ObjectSizeGreaterThan == nil && result.ObjectSizeLessThan == nil { - result.Prefix = aws.String(v) - } - - return result -} - -func ExpandLifecycleRuleFilterAndOperator(ctx context.Context, m map[string]interface{}) *s3.LifecycleRuleAndOperator { - if len(m) == 0 { - return nil - } - - result := &s3.LifecycleRuleAndOperator{} - - if v, ok := m["object_size_greater_than"].(int); ok && v > 0 { - result.ObjectSizeGreaterThan = aws.Int64(int64(v)) - } - - if v, ok := m["object_size_less_than"].(int); ok && v > 0 { - result.ObjectSizeLessThan = aws.Int64(int64(v)) - } - - if v, ok := m["prefix"].(string); ok { - result.Prefix = aws.String(v) - } - - if v, ok := m["tags"].(map[string]interface{}); ok && len(v) > 0 { - tags := Tags(tftags.New(ctx, v).IgnoreAWS()) - if len(tags) > 0 { - result.Tags = tags - } - } - - return result -} - -func ExpandLifecycleRuleFilterTag(m map[string]interface{}) *s3.Tag { - if len(m) == 0 { - return nil - } - - result := &s3.Tag{} - - if key, ok := m["key"].(string); ok { - result.Key = aws.String(key) - } - - if value, ok := m["value"].(string); ok { - result.Value = aws.String(value) - } - - return result -} - -func ExpandLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *s3.NoncurrentVersionExpiration { - if len(m) == 0 { - return nil - } - - result := &s3.NoncurrentVersionExpiration{} - - if v, null, _ := nullable.Int(m["newer_noncurrent_versions"].(string)).Value(); !null && v > 0 { - result.NewerNoncurrentVersions = aws.Int64(v) - } - - if v, ok := m["noncurrent_days"].(int); ok { - result.NoncurrentDays = aws.Int64(int64(v)) - } - - return result -} - -func ExpandLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []*s3.NoncurrentVersionTransition { - if len(l) == 0 || l[0] == nil { - return nil - } - - var results []*s3.NoncurrentVersionTransition - - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - - if !ok { - continue - } - - transition := &s3.NoncurrentVersionTransition{} - - if v, null, _ := nullable.Int(tfMap["newer_noncurrent_versions"].(string)).Value(); !null && v > 0 { - transition.NewerNoncurrentVersions = aws.Int64(v) - } - - if v, ok := tfMap["noncurrent_days"].(int); ok { - transition.NoncurrentDays = aws.Int64(int64(v)) - } - - if v, ok := tfMap["storage_class"].(string); ok && v != "" { - transition.StorageClass = aws.String(v) - } - - results = append(results, transition) - } - - return results -} - -func ExpandLifecycleRuleTransitions(l []interface{}) ([]*s3.Transition, error) { - if len(l) == 0 || l[0] == nil { - return nil, nil - } - - var results []*s3.Transition - - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - - if !ok { - continue - } - - transition := &s3.Transition{} - - if v, ok := tfMap["date"].(string); ok && v != "" { - t, err := time.Parse(time.RFC3339, v) - if err != nil { - return nil, fmt.Errorf("parsing S3 Bucket Lifecycle Rule Transition date: %w", err) - } - transition.Date = aws.Time(t) - } - - // Only one of "date" and "days" can be configured - // so only set the transition.Days value when transition.Date is nil - // By default, tfMap["days"] = 0 if not explicitly configured in terraform. - if v, ok := tfMap["days"].(int); ok && v >= 0 && transition.Date == nil { - transition.Days = aws.Int64(int64(v)) - } - - if v, ok := tfMap["storage_class"].(string); ok && v != "" { - transition.StorageClass = aws.String(v) - } - - results = append(results, transition) - } - - return results, nil -} - -func ExpandLifecycleRules(ctx context.Context, l []interface{}) ([]*s3.LifecycleRule, error) { - if len(l) == 0 || l[0] == nil { - return nil, nil - } - - var results []*s3.LifecycleRule - - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - - if !ok { - continue - } - - result := &s3.LifecycleRule{} - - if v, ok := tfMap["abort_incomplete_multipart_upload"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.AbortIncompleteMultipartUpload = ExpandLifecycleRuleAbortIncompleteMultipartUpload(v[0].(map[string]interface{})) - } - - if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { - expiration, err := ExpandLifecycleRuleExpiration(v) - if err != nil { - return nil, err - } - result.Expiration = expiration - } - - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { - result.Filter = ExpandLifecycleRuleFilter(ctx, v) - } - - if v, ok := tfMap["prefix"].(string); ok && result.Filter == nil { - // If neither the filter block nor the prefix are specified, - // apply the Default behavior from v3.x of the provider; - // otherwise, set the prefix as specified in Terraform. - if v == "" { - result.SetFilter(&s3.LifecycleRuleFilter{ - Prefix: aws.String(v), - }) - } else { - result.Prefix = aws.String(v) - } - } - - if v, ok := tfMap["id"].(string); ok { - result.ID = aws.String(v) - } - - if v, ok := tfMap["noncurrent_version_expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.NoncurrentVersionExpiration = ExpandLifecycleRuleNoncurrentVersionExpiration(v[0].(map[string]interface{})) - } - - if v, ok := tfMap["noncurrent_version_transition"].(*schema.Set); ok && v.Len() > 0 { - result.NoncurrentVersionTransitions = ExpandLifecycleRuleNoncurrentVersionTransitions(v.List()) - } - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - if v, ok := tfMap["transition"].(*schema.Set); ok && v.Len() > 0 { - transitions, err := ExpandLifecycleRuleTransitions(v.List()) - if err != nil { - return nil, err - } - result.Transitions = transitions - } - - results = append(results, result) - } - - return results, nil -} - -func ExpandReplicationRuleDestinationMetrics(l []interface{}) *s3.Metrics { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.Metrics{} - - if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EventThreshold = ExpandReplicationRuleDestinationReplicationTimeValue(v) - } - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleFilterAndOperator(ctx context.Context, l []interface{}) *s3.ReplicationRuleAndOperator { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.ReplicationRuleAndOperator{} - - if v, ok := tfMap["prefix"].(string); ok && v != "" { - result.Prefix = aws.String(v) - } - - if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { - tags := Tags(tftags.New(ctx, v).IgnoreAWS()) - if len(tags) > 0 { - result.Tags = tags - } - } - - return result -} - -func ExpandReplicationRuleDestinationReplicationTime(l []interface{}) *s3.ReplicationTime { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.ReplicationTime{} - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Time = ExpandReplicationRuleDestinationReplicationTimeValue(v) - } - - return result -} - -func ExpandReplicationRuleDestinationReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.ReplicationTimeValue{} - - if v, ok := tfMap["minutes"].(int); ok { - result.Minutes = aws.Int64(int64(v)) - } - - return result -} - -func ExpandSourceSelectionCriteriaReplicaModifications(l []interface{}) *s3.ReplicaModifications { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.ReplicaModifications{} - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - return result -} - -func ExpandReplicationRules(ctx context.Context, l []interface{}) []*s3.ReplicationRule { - var rules []*s3.ReplicationRule - - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { - continue - } - rule := &s3.ReplicationRule{} - - if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.DeleteMarkerReplication = ExpandReplicationRuleDeleteMarkerReplication(v) - } - - if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Destination = ExpandReplicationRuleDestination(v) - } - - if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ExistingObjectReplication = ExpandReplicationRuleExistingObjectReplication(v) - } - - if v, ok := tfMap["id"].(string); ok && v != "" { - rule.ID = aws.String(v) - } - - if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.SourceSelectionCriteria = ExpandReplicationRuleSourceSelectionCriteria(v) - } - - if v, ok := tfMap["status"].(string); ok && v != "" { - rule.Status = aws.String(v) - } - - // Support the empty filter block in terraform i.e. 'filter {}', - // which implies the replication rule does not require a specific filter, - // by expanding the "filter" array even if the first element is nil. - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { - // XML schema V2 - rule.Filter = ExpandReplicationRuleFilter(ctx, v) - rule.Priority = aws.Int64(int64(tfMap["priority"].(int))) - } else { - // XML schema V1 - rule.Prefix = aws.String(tfMap["prefix"].(string)) - } - - rules = append(rules, rule) - } - - return rules -} - -func ExpandReplicationRuleSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.SourceSelectionCriteria{} - - if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicaModifications = ExpandSourceSelectionCriteriaReplicaModifications(v) - } - - if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.SseKmsEncryptedObjects = ExpandSourceSelectionCriteriaSSEKMSEncryptedObjects(v) - } - - return result -} - -func ExpandSourceSelectionCriteriaSSEKMSEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.SseKmsEncryptedObjects{} - - if v, ok := tfMap["status"].(string); ok && v != "" { - result.Status = aws.String(v) - } - - return result -} - -func ExpandReplicationRuleFilterTag(l []interface{}) *s3.Tag { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - - if !ok { - return nil - } - - result := &s3.Tag{} - - if v, ok := tfMap["key"].(string); ok && v != "" { - result.Key = aws.String(v) - } - - if v, ok := tfMap["value"].(string); ok && v != "" { - result.Value = aws.String(v) - } - - return result -} - -func FlattenReplicationRuleDestinationAccessControlTranslation(act *s3.AccessControlTranslation) []interface{} { - if act == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if act.Owner != nil { - m["owner"] = aws.StringValue(act.Owner) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleDestinationEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{} { - if ec == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if ec.ReplicaKmsKeyID != nil { - m["replica_kms_key_id"] = aws.StringValue(ec.ReplicaKmsKeyID) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface{} { - if dmr == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if dmr.Status != nil { - m["status"] = aws.StringValue(dmr.Status) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleDestination(dest *s3.Destination) []interface{} { - if dest == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if dest.AccessControlTranslation != nil { - m["access_control_translation"] = FlattenReplicationRuleDestinationAccessControlTranslation(dest.AccessControlTranslation) - } - - if dest.Account != nil { - m["account"] = aws.StringValue(dest.Account) - } - - if dest.Bucket != nil { - m["bucket"] = aws.StringValue(dest.Bucket) - } - - if dest.EncryptionConfiguration != nil { - m["encryption_configuration"] = FlattenReplicationRuleDestinationEncryptionConfiguration(dest.EncryptionConfiguration) - } - - if dest.Metrics != nil { - m["metrics"] = FlattenReplicationRuleDestinationMetrics(dest.Metrics) - } - - if dest.ReplicationTime != nil { - m["replication_time"] = FlattenReplicationRuleDestinationReplicationTime(dest.ReplicationTime) - } - - if dest.StorageClass != nil { - m["storage_class"] = aws.StringValue(dest.StorageClass) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleExistingObjectReplication(eor *s3.ExistingObjectReplication) []interface{} { - if eor == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if eor.Status != nil { - m["status"] = aws.StringValue(eor.Status) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleFilter(ctx context.Context, filter *s3.ReplicationRuleFilter) []interface{} { - if filter == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if filter.And != nil { - m["and"] = FlattenReplicationRuleFilterAndOperator(ctx, filter.And) - } - - if filter.Prefix != nil { - m["prefix"] = aws.StringValue(filter.Prefix) - } - - if filter.Tag != nil { - m["tag"] = FlattenReplicationRuleFilterTag(filter.Tag) - } - - return []interface{}{m} -} - -func FlattenLifecycleRules(ctx context.Context, rules []*s3.LifecycleRule) []interface{} { - if len(rules) == 0 { - return []interface{}{} - } - - var results []interface{} - - for _, rule := range rules { - if rule == nil { - continue - } - - m := make(map[string]interface{}) - - if rule.AbortIncompleteMultipartUpload != nil { - m["abort_incomplete_multipart_upload"] = FlattenLifecycleRuleAbortIncompleteMultipartUpload(rule.AbortIncompleteMultipartUpload) - } - - if rule.Expiration != nil { - m["expiration"] = FlattenLifecycleRuleExpiration(rule.Expiration) - } - - if rule.Filter != nil { - m["filter"] = FlattenLifecycleRuleFilter(ctx, rule.Filter) - } - - if rule.ID != nil { - m["id"] = aws.StringValue(rule.ID) - } - - if rule.NoncurrentVersionExpiration != nil { - m["noncurrent_version_expiration"] = FlattenLifecycleRuleNoncurrentVersionExpiration(rule.NoncurrentVersionExpiration) - } - - if rule.NoncurrentVersionTransitions != nil { - m["noncurrent_version_transition"] = FlattenLifecycleRuleNoncurrentVersionTransitions(rule.NoncurrentVersionTransitions) - } - - if rule.Prefix != nil { - m["prefix"] = aws.StringValue(rule.Prefix) - } - - if rule.Status != nil { - m["status"] = aws.StringValue(rule.Status) - } - - if rule.Transitions != nil { - m["transition"] = FlattenLifecycleRuleTransitions(rule.Transitions) - } - - results = append(results, m) - } - - return results -} - -func FlattenLifecycleRuleAbortIncompleteMultipartUpload(u *s3.AbortIncompleteMultipartUpload) []interface{} { - if u == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if u.DaysAfterInitiation != nil { - m["days_after_initiation"] = int(aws.Int64Value(u.DaysAfterInitiation)) - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { - if expiration == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if expiration.Days != nil { - m["days"] = int(aws.Int64Value(expiration.Days)) - } - - if expiration.Date != nil { - m["date"] = expiration.Date.Format(time.RFC3339) - } - - if expiration.ExpiredObjectDeleteMarker != nil { - m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleFilter(ctx context.Context, filter *s3.LifecycleRuleFilter) []interface{} { - if filter == nil { - return nil - } - - m := make(map[string]interface{}) - - if filter.And != nil { - m["and"] = FlattenLifecycleRuleFilterAndOperator(ctx, filter.And) - } - - if filter.ObjectSizeGreaterThan != nil { - m["object_size_greater_than"] = strconv.FormatInt(aws.Int64Value(filter.ObjectSizeGreaterThan), 10) - } - - if filter.ObjectSizeLessThan != nil { - m["object_size_less_than"] = strconv.FormatInt(aws.Int64Value(filter.ObjectSizeLessThan), 10) - } - - if filter.Prefix != nil { - m["prefix"] = aws.StringValue(filter.Prefix) - } - - if filter.Tag != nil { - m["tag"] = FlattenLifecycleRuleFilterTag(filter.Tag) - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleFilterAndOperator(ctx context.Context, andOp *s3.LifecycleRuleAndOperator) []interface{} { - if andOp == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if andOp.ObjectSizeGreaterThan != nil { - m["object_size_greater_than"] = int(aws.Int64Value(andOp.ObjectSizeGreaterThan)) - } - - if andOp.ObjectSizeLessThan != nil { - m["object_size_less_than"] = int(aws.Int64Value(andOp.ObjectSizeLessThan)) - } - - if andOp.Prefix != nil { - m["prefix"] = aws.StringValue(andOp.Prefix) - } - - if andOp.Tags != nil { - m["tags"] = KeyValueTags(ctx, andOp.Tags).IgnoreAWS().Map() - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleFilterTag(tag *s3.Tag) []interface{} { - if tag == nil { - return nil - } - - m := make(map[string]interface{}) - - if tag.Key != nil { - m["key"] = aws.StringValue(tag.Key) - } - - if tag.Value != nil { - m["value"] = aws.StringValue(tag.Value) - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleNoncurrentVersionExpiration(expiration *s3.NoncurrentVersionExpiration) []interface{} { - if expiration == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if expiration.NewerNoncurrentVersions != nil { - m["newer_noncurrent_versions"] = strconv.FormatInt(aws.Int64Value(expiration.NewerNoncurrentVersions), 10) - } - - if expiration.NoncurrentDays != nil { - m["noncurrent_days"] = int(aws.Int64Value(expiration.NoncurrentDays)) - } - - return []interface{}{m} -} - -func FlattenLifecycleRuleNoncurrentVersionTransitions(transitions []*s3.NoncurrentVersionTransition) []interface{} { - if len(transitions) == 0 { - return []interface{}{} - } - - var results []interface{} - - for _, transition := range transitions { - if transition == nil { - continue - } - - m := make(map[string]interface{}) - - if transition.NewerNoncurrentVersions != nil { - m["newer_noncurrent_versions"] = strconv.FormatInt(aws.Int64Value(transition.NewerNoncurrentVersions), 10) - } - - if transition.NoncurrentDays != nil { - m["noncurrent_days"] = int(aws.Int64Value(transition.NoncurrentDays)) - } - - if transition.StorageClass != nil { - m["storage_class"] = aws.StringValue(transition.StorageClass) - } - - results = append(results, m) - } - - return results -} - -func FlattenLifecycleRuleTransitions(transitions []*s3.Transition) []interface{} { - if len(transitions) == 0 { - return []interface{}{} - } - - var results []interface{} - - for _, transition := range transitions { - if transition == nil { - continue - } - - m := make(map[string]interface{}) - - if transition.Date != nil { - m["date"] = transition.Date.Format(time.RFC3339) - } - - if transition.Days != nil { - m["days"] = int(aws.Int64Value(transition.Days)) - } - - if transition.StorageClass != nil { - m["storage_class"] = aws.StringValue(transition.StorageClass) - } - - results = append(results, m) - } - - return results -} - -func FlattenReplicationRuleDestinationMetrics(metrics *s3.Metrics) []interface{} { - if metrics == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if metrics.EventThreshold != nil { - m["event_threshold"] = FlattenReplicationRuleDestinationReplicationTimeValue(metrics.EventThreshold) - } - - if metrics.Status != nil { - m["status"] = aws.StringValue(metrics.Status) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleDestinationReplicationTime(rt *s3.ReplicationTime) []interface{} { - if rt == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if rt.Status != nil { - m["status"] = aws.StringValue(rt.Status) - } - - if rt.Time != nil { - m["time"] = FlattenReplicationRuleDestinationReplicationTimeValue(rt.Time) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleDestinationReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { - if rtv == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if rtv.Minutes != nil { - m["minutes"] = int(aws.Int64Value(rtv.Minutes)) - } - - return []interface{}{m} -} - -func FlattenReplicationRules(ctx context.Context, rules []*s3.ReplicationRule) []interface{} { - if len(rules) == 0 { - return []interface{}{} - } - - var results []interface{} - - for _, rule := range rules { - if rule == nil { - continue - } - - m := make(map[string]interface{}) - - if rule.DeleteMarkerReplication != nil { - m["delete_marker_replication"] = FlattenReplicationRuleDeleteMarkerReplication(rule.DeleteMarkerReplication) - } - - if rule.Destination != nil { - m["destination"] = FlattenReplicationRuleDestination(rule.Destination) - } - - if rule.ExistingObjectReplication != nil { - m["existing_object_replication"] = FlattenReplicationRuleExistingObjectReplication(rule.ExistingObjectReplication) - } - - if rule.Filter != nil { - m["filter"] = FlattenReplicationRuleFilter(ctx, rule.Filter) - } - - if rule.ID != nil { - m["id"] = aws.StringValue(rule.ID) - } - - if rule.Prefix != nil { - m["prefix"] = aws.StringValue(rule.Prefix) - } - - if rule.Priority != nil { - m["priority"] = int(aws.Int64Value(rule.Priority)) - } - - if rule.SourceSelectionCriteria != nil { - m["source_selection_criteria"] = FlattenReplicationRuleSourceSelectionCriteria(rule.SourceSelectionCriteria) - } - - if rule.Status != nil { - m["status"] = aws.StringValue(rule.Status) - } - - results = append(results, m) - } - - return results -} - -func FlattenSourceSelectionCriteriaReplicaModifications(rc *s3.ReplicaModifications) []interface{} { - if rc == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if rc.Status != nil { - m["status"] = aws.StringValue(rc.Status) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleFilterAndOperator(ctx context.Context, op *s3.ReplicationRuleAndOperator) []interface{} { - if op == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if op.Prefix != nil { - m["prefix"] = aws.StringValue(op.Prefix) - } - - if op.Tags != nil { - m["tags"] = KeyValueTags(ctx, op.Tags).IgnoreAWS().Map() - } - - return []interface{}{m} -} - -func FlattenReplicationRuleFilterTag(tag *s3.Tag) []interface{} { - if tag == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if tag.Key != nil { - m["key"] = aws.StringValue(tag.Key) - } - - if tag.Value != nil { - m["value"] = aws.StringValue(tag.Value) - } - - return []interface{}{m} -} - -func FlattenReplicationRuleSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { - if ssc == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if ssc.ReplicaModifications != nil { - m["replica_modifications"] = FlattenSourceSelectionCriteriaReplicaModifications(ssc.ReplicaModifications) - } - - if ssc.SseKmsEncryptedObjects != nil { - m["sse_kms_encrypted_objects"] = FlattenSourceSelectionCriteriaSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) - } - - return []interface{}{m} -} - -func FlattenSourceSelectionCriteriaSSEKMSEncryptedObjects(objects *s3.SseKmsEncryptedObjects) []interface{} { - if objects == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if objects.Status != nil { - m["status"] = aws.StringValue(objects.Status) - } - - return []interface{}{m} -} diff --git a/internal/service/s3/flex_test.go b/internal/service/s3/flex_test.go deleted file mode 100644 index 63309b58949..00000000000 --- a/internal/service/s3/flex_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package s3 - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" -) - -func TestExpandReplicationRuleFilterTag(t *testing.T) { - t.Parallel() - - expectedKey := "TestKey1" - expectedValue := "TestValue1" - - tagMap := map[string]interface{}{ - "key": expectedKey, - "value": expectedValue, - } - - result := ExpandReplicationRuleFilterTag([]interface{}{tagMap}) - - if result == nil { - t.Fatalf("Expected *s3.Tag to not be nil") - } - - if actualKey := aws.StringValue(result.Key); actualKey != expectedKey { - t.Fatalf("Expected key %s, got %s", expectedKey, actualKey) - } - - if actualValue := aws.StringValue(result.Value); actualValue != expectedValue { - t.Fatalf("Expected value %s, got %s", expectedValue, actualValue) - } -} - -func TestFlattenReplicationRuleFilterTag(t *testing.T) { - t.Parallel() - - expectedKey := "TestKey1" - expectedValue := "TestValue1" - - tag := &s3.Tag{ - Key: aws.String(expectedKey), - Value: aws.String(expectedValue), - } - - result := FlattenReplicationRuleFilterTag(tag) - - if len(result) != 1 { - t.Fatalf("Expected array to have exactly 1 element, got %d", len(result)) - } - - tagMap, ok := result[0].(map[string]interface{}) - if !ok { - t.Fatal("Expected element in array to be a map[string]interface{}") - } - - actualKey, ok := tagMap["key"].(string) - if !ok { - t.Fatal("Expected string 'key' key in the map") - } - - if actualKey != expectedKey { - t.Fatalf("Expected 'key' to equal %s, got %s", expectedKey, actualKey) - } - - actualValue, ok := tagMap["value"].(string) - if !ok { - t.Fatal("Expected string 'value' key in the map") - } - - if actualValue != expectedValue { - t.Fatalf("Expected 'value' to equal %s, got %s", expectedValue, actualValue) - } -} diff --git a/internal/service/s3/status.go b/internal/service/s3/status.go deleted file mode 100644 index 0e6ffc477a5..00000000000 --- a/internal/service/s3/status.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package s3 - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func lifecycleConfigurationRulesStatus(ctx context.Context, conn *s3.S3, bucket, expectedBucketOwner string, rules []*s3.LifecycleRule) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - input := &s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketLifecycleConfigurationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration, s3.ErrCodeNoSuchBucket) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - if output == nil { - return nil, "", &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - for _, expectedRule := range rules { - found := false - - for _, actualRule := range output.Rules { - if aws.StringValue(actualRule.ID) != aws.StringValue(expectedRule.ID) { - continue - } - found = true - if aws.StringValue(actualRule.Status) != aws.StringValue(expectedRule.Status) { - return output, LifecycleConfigurationRulesStatusNotReady, nil - } - } - - if !found { - return output, LifecycleConfigurationRulesStatusNotReady, nil - } - } - - return output, LifecycleConfigurationRulesStatusReady, nil - } -} diff --git a/internal/service/s3/wait.go b/internal/service/s3/wait.go deleted file mode 100644 index fa5525cb2e5..00000000000 --- a/internal/service/s3/wait.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package s3 - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -const ( - lifecycleConfigurationExtraRetryDelay = 5 * time.Second - lifecycleConfigurationRulesPropagationTimeout = 3 * time.Minute - lifecycleConfigurationRulesSteadyTimeout = 2 * time.Minute - - // General timeout for S3 bucket changes to propagate. - // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel. - s3BucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name - - // LifecycleConfigurationRulesStatusReady occurs when all configured rules reach their desired state (Enabled or Disabled) - LifecycleConfigurationRulesStatusReady = "READY" - // LifecycleConfigurationRulesStatusNotReady occurs when all configured rules have not reached their desired state (Enabled or Disabled) - LifecycleConfigurationRulesStatusNotReady = "NOT_READY" -) - -func waitForLifecycleConfigurationRulesStatus(ctx context.Context, conn *s3.S3, bucket, expectedBucketOwner string, rules []*s3.LifecycleRule) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{"", LifecycleConfigurationRulesStatusNotReady}, - Target: []string{LifecycleConfigurationRulesStatusReady}, - Refresh: lifecycleConfigurationRulesStatus(ctx, conn, bucket, expectedBucketOwner, rules), - Timeout: lifecycleConfigurationRulesPropagationTimeout, - MinTimeout: 10 * time.Second, - ContinuousTargetOccurence: 3, - NotFoundChecks: 20, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} diff --git a/website/docs/r/s3_bucket_logging.html.markdown b/website/docs/r/s3_bucket_logging.html.markdown index 995ae8fae92..69b9a42af11 100644 --- a/website/docs/r/s3_bucket_logging.html.markdown +++ b/website/docs/r/s3_bucket_logging.html.markdown @@ -52,6 +52,7 @@ This resource supports the following arguments: * `target_bucket` - (Required) Name of the bucket where you want Amazon S3 to store server access logs. * `target_prefix` - (Required) Prefix for all log object keys. * `target_grant` - (Optional) Set of configuration blocks with information for granting permissions. [See below](#target_grant). +* `target_object_key_format` - (Optional) Amazon S3 key format for log objects. [See below](#target_object_key_format). ### target_grant @@ -69,6 +70,19 @@ The `grantee` configuration block supports the following arguments: * `type` - (Required) Type of grantee. Valid values: `CanonicalUser`, `AmazonCustomerByEmail`, `Group`. * `uri` - (Optional) URI of the grantee group. +### target_object_key_format + +The `target_object_key_format` configuration block supports the following arguments: + +* `partitioned_prefix` - (Optional) Partitioned S3 key for log objects. [See below](#partitioned_prefix). +* `simple_prefix` - (Optional) Use the simple format for S3 keys for log objects. To use, set `simple_prefix {}`. + +### partitioned_prefix + +The `partitioned_prefix` configuration block supports the following arguments: + +* `partition_date_source` - (Required) Specifies the partition date source for the partitioned prefix. Valid values: `EventTime`, `DeliveryTime`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: