diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index 5be6586d0b8..0f213cf86a2 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -74,6 +74,18 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeMap, Computed: true, }, + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Computed: true, + }, + "object_lock_mode": { + Type: schema.TypeString, + Computed: true, + }, + "object_lock_retain_until_date": { + Type: schema.TypeString, + Computed: true, + }, "range": { Type: schema.TypeString, Optional: true, @@ -155,6 +167,9 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("expires", out.Expires) d.Set("last_modified", out.LastModified.Format(time.RFC1123)) d.Set("metadata", pointersMapToStringList(out.Metadata)) + d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", out.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(out.ObjectLockRetainUntilDate)) d.Set("server_side_encryption", out.ServerSideEncryption) d.Set("sse_kms_key_id", out.SSEKMSKeyId) d.Set("version_id", out.VersionId) diff --git a/aws/data_source_aws_s3_bucket_object_test.go b/aws/data_source_aws_s3_bucket_object_test.go index a547fbf7303..15532d17c1b 100644 --- a/aws/data_source_aws_s3_bucket_object_test.go +++ b/aws/data_source_aws_s3_bucket_object_test.go @@ -4,6 +4,7 @@ import ( "fmt" "regexp" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -39,6 +40,9 @@ func TestAccDataSourceAWSS3BucketObject_basic(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, @@ -73,6 +77,9 @@ func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "a6105c0a611b41b08f1209506350279e"), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "yes"), ), }, @@ -110,6 +117,9 @@ func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) { regexp.MustCompile(`^arn:aws:kms:[a-z]{2}-[a-z]+-\d{1}:[0-9]{12}:key/[a-z0-9-]{36}$`)), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "Keep Calm and Carry On"), ), }, @@ -161,6 +171,84 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "tags.%", "1"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOff(t *testing.T) { + rInt := acctest.RandInt() + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(rInt) + + var rObj s3.GetObjectOutput + var dsObj s3.GetObjectOutput + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: resourceOnlyConf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), + ), + }, + { + Config: conf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), + resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", + regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), + resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOn(t *testing.T) { + rInt := acctest.RandInt() + retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(rInt, retainUntilDate) + + var rObj s3.GetObjectOutput + var dsObj s3.GetObjectOutput + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: resourceOnlyConf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), + ), + }, + { + Config: conf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), + resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", + regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", retainUntilDate), + resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, }, @@ -303,3 +391,68 @@ data "aws_s3_bucket_object" "obj" { return resources, both } + +func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(randInt int) (string, string) { + resources := fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "tf-testing-obj-%d" + content = "Hello World" + object_lock_legal_hold_status = "OFF" +} +`, randInt, randInt) + + both := fmt.Sprintf(`%s +data "aws_s3_bucket_object" "obj" { + bucket = "tf-object-test-bucket-%d" + key = "tf-testing-obj-%d" +} +`, resources, randInt, randInt) + + return resources, both +} + +func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(randInt int, retainUntilDate string) (string, string) { + resources := fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "tf-testing-obj-%d" + content = "Hello World" + force_destroy = true + object_lock_legal_hold_status = "ON" + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "%s" +} +`, randInt, randInt, retainUntilDate) + + both := fmt.Sprintf(`%s +data "aws_s3_bucket_object" "obj" { + bucket = "tf-object-test-bucket-%d" + key = "tf-testing-obj-%d" +} +`, resources, randInt, randInt) + + return resources, both +} diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 49c73881d6c..cdf90113c5a 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -567,8 +567,8 @@ func resourceAwsS3Bucket() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - s3.ObjectLockModeGovernance, - s3.ObjectLockModeCompliance, + s3.ObjectLockRetentionModeGovernance, + s3.ObjectLockRetentionModeCompliance, }, false), }, @@ -1259,49 +1259,17 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { // bucket may have things delete them log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) - bucket := d.Get("bucket").(string) - resp, err := s3conn.ListObjectVersions( - &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - }, - ) - - if err != nil { - return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) - } - - objectsToDelete := make([]*s3.ObjectIdentifier, 0) - - if len(resp.DeleteMarkers) != 0 { - - for _, v := range resp.DeleteMarkers { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } + // Delete everything including locked objects. + // Don't ignore any object errors or we could recurse infinitely. + var objectLockEnabled bool + objectLockConfiguration := expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) + if objectLockConfiguration != nil { + objectLockEnabled = aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled } - - if len(resp.Versions) != 0 { - for _, v := range resp.Versions { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - params := &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objectsToDelete, - }, - } - - _, err = s3conn.DeleteObjects(params) + err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", objectLockEnabled, false) if err != nil { - return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) + return fmt.Errorf("error S3 Bucket force_destroy: %s", err) } // this line recurses until all objects are deleted or an error is returned @@ -2472,7 +2440,7 @@ type S3Website struct { // S3 Object Lock functions. // -func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) (interface{}, error) { +func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) ([]interface{}, error) { resp, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return conn.GetObjectLockConfiguration(&s3.GetObjectLockConfigurationInput{ Bucket: aws.String(bucket), diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index 5a2d3e37951..81c3d64841a 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -9,6 +9,7 @@ import ( "net/url" "os" "strings" + "time" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" @@ -162,6 +163,36 @@ func resourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockLegalHoldStatusOn, + s3.ObjectLockLegalHoldStatusOff, + }, false), + }, + + "object_lock_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockModeGovernance, + s3.ObjectLockModeCompliance, + }, false), + }, + + "object_lock_retain_until_date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.ValidateRFC3339TimeString, + }, }, } } @@ -263,6 +294,18 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.WebsiteRedirectLocation = aws.String(v.(string)) } + if v, ok := d.GetOk("object_lock_legal_hold_status"); ok { + putInput.ObjectLockLegalHoldStatus = aws.String(v.(string)) + } + + if v, ok := d.GetOk("object_lock_mode"); ok { + putInput.ObjectLockMode = aws.String(v.(string)) + } + + if v, ok := d.GetOk("object_lock_retain_until_date"); ok { + putInput.ObjectLockRetainUntilDate = expandS3ObjectLockRetainUntilDate(v.(string)) + } + if _, err := s3conn.PutObject(putInput); err != nil { return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err) } @@ -317,6 +360,9 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("version_id", resp.VersionId) d.Set("server_side_encryption", resp.ServerSideEncryption) d.Set("website_redirect", resp.WebsiteRedirectLocation) + d.Set("object_lock_legal_hold_status", resp.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", resp.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(resp.ObjectLockRetainUntilDate)) // Only set non-default KMS key ID (one that doesn't match default) if resp.SSEKMSKeyId != nil { @@ -387,6 +433,45 @@ func resourceAwsS3BucketObjectUpdate(d *schema.ResourceData, meta interface{}) e } } + if d.HasChange("object_lock_legal_hold_status") { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(d.Get("object_lock_legal_hold_status").(string)), + }, + }) + if err != nil { + return fmt.Errorf("error putting S3 object lock legal hold: %s", err) + } + } + + if d.HasChange("object_lock_mode") || d.HasChange("object_lock_retain_until_date") { + req := &s3.PutObjectRetentionInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + Retention: &s3.ObjectLockRetention{ + Mode: aws.String(d.Get("object_lock_mode").(string)), + RetainUntilDate: expandS3ObjectLockRetainUntilDate(d.Get("object_lock_retain_until_date").(string)), + }, + } + + // Bypass required to lower or clear retain-until date. + if d.HasChange("object_lock_retain_until_date") { + oraw, nraw := d.GetChange("object_lock_retain_until_date") + o := expandS3ObjectLockRetainUntilDate(oraw.(string)) + n := expandS3ObjectLockRetainUntilDate(nraw.(string)) + if n == nil || (o != nil && n.Before(*o)) { + req.BypassGovernanceRetention = aws.Bool(true) + } + } + + _, err := conn.PutObjectRetention(req) + if err != nil { + return fmt.Errorf("error putting S3 object lock retention: %s", err) + } + } + if err := setTagsS3Object(conn, d); err != nil { return fmt.Errorf("error setting S3 object tags: %s", err) } @@ -402,39 +487,8 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e // We are effectively ignoring any leading '/' in the key name as aws.Config.DisableRestProtocolURICleaning is false key = strings.TrimPrefix(key, "/") - if _, ok := d.GetOk("version_id"); ok { - // Bucket is versioned, we need to delete all versions - vInput := s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(key), - } - out, err := s3conn.ListObjectVersions(&vInput) - if err != nil { - return fmt.Errorf("Failed listing S3 object versions: %s", err) - } - - for _, v := range out.Versions { - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - VersionId: v.VersionId, - } - _, err := s3conn.DeleteObject(&input) - if err != nil { - return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s", - key, v, err) - } - } - } else { - // Just delete the object - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - _, err := s3conn.DeleteObject(&input) - if err != nil { - return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key) - } + if err := deleteAllS3ObjectVersions(s3conn, bucket, key, d.Get("force_destroy").(bool), false); err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) Object (%s): %s", bucket, key, err) } return nil @@ -459,3 +513,184 @@ func resourceAwsS3BucketObjectCustomizeDiff(d *schema.ResourceDiff, meta interfa return nil } + +// deleteAllS3ObjectVersions deletes all versions of a specified key from an S3 bucket. +// If key is empty then all versions of all objects are deleted. +// Set force to true to override any S3 object lock protections on object lock enabled buckets. +func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) error { + input := &s3.ListObjectVersionsInput{ + Bucket: aws.String(bucketName), + } + if key != "" { + input.Prefix = aws.String(key) + } + + var lastErr error + err := conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, objectVersion := range page.Versions { + objectKey := aws.StringValue(objectVersion.Key) + objectVersionID := aws.StringValue(objectVersion.VersionId) + + if key != "" && key != objectKey { + continue + } + + err := deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) + if isAWSErr(err, "AccessDenied", "") && force { + // Remove any legal hold. + resp, err := conn.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + }) + + if err != nil { + log.Printf("[ERROR] Error getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + if aws.StringValue(resp.ObjectLockLegalHoldStatus) == s3.ObjectLockLegalHoldStatusOn { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(s3.ObjectLockLegalHoldStatusOff), + }, + }) + + if err != nil { + log.Printf("[ERROR] Error putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + // Attempt to delete again. + err = deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) + + if err != nil { + lastErr = err + } + + continue + } + + // AccessDenied for another reason. + lastErr = fmt.Errorf("AccessDenied deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) + continue + } + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object version, last error: %s", lastErr) + } + + lastErr = nil + } + + err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, deleteMarker := range page.DeleteMarkers { + deleteMarkerKey := aws.StringValue(deleteMarker.Key) + deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) + + if key != "" && key != deleteMarkerKey { + continue + } + + // Delete markers have no object lock protections. + err := deleteS3ObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID, false) + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object delete marker, last error: %s", lastErr) + } + + lastErr = nil + } + + return nil +} + +// deleteS3ObjectVersion deletes a specific bucket object version. +// Set force to true to override any S3 object lock protections. +func deleteS3ObjectVersion(conn *s3.S3, b, k, v string, force bool) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(b), + Key: aws.String(k), + VersionId: aws.String(v), + } + if force { + input.BypassGovernanceRetention = aws.Bool(true) + } + + log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) + _, err := conn.DeleteObject(input) + + if err != nil { + log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) + } + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { + return nil + } + + return err +} + +func expandS3ObjectLockRetainUntilDate(v string) *time.Time { + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return nil + } + + return aws.Time(t) +} + +func flattenS3ObjectLockRetainUntilDate(t *time.Time) string { + if t == nil { + return "" + } + + return t.Format(time.RFC3339) +} diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index c733339a3e9..8b8bb0175a9 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -11,6 +11,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -81,80 +82,16 @@ func testSweepS3BucketObjects(region string) error { continue } - input := &s3.ListObjectVersionsInput{ - Bucket: bucket.Name, - } - - err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, objectVersion := range page.Versions { - input := &s3.DeleteObjectInput{ - Bucket: bucket.Name, - Key: objectVersion.Key, - VersionId: objectVersion.VersionId, - } - objectKey := aws.StringValue(objectVersion.Key) - objectVersionID := aws.StringValue(objectVersion.VersionId) - - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) - _, err := conn.DeleteObject(input) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { - continue - } - - if err != nil { - log.Printf("[ERROR] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", bucketName, objectKey, objectVersionID, err) - } - } - - return !lastPage - }) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { - continue - } + objectLockEnabled, err := testS3BucketObjectLockEnabled(conn, bucketName) if err != nil { - return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucketName, err) - } - - err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, deleteMarker := range page.DeleteMarkers { - input := &s3.DeleteObjectInput{ - Bucket: bucket.Name, - Key: deleteMarker.Key, - VersionId: deleteMarker.VersionId, - } - deleteMarkerKey := aws.StringValue(deleteMarker.Key) - deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) - - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Delete Marker: %s", bucketName, deleteMarkerKey, deleteMarkerVersionID) - _, err := conn.DeleteObject(input) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { - continue - } - - if err != nil { - log.Printf("[ERROR] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", bucketName, deleteMarkerKey, deleteMarkerVersionID, err) - } - } - - return !lastPage - }) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + log.Printf("[ERROR] Error getting S3 Bucket (%s) Object Lock: %s", bucketName, err) continue } + // Delete everything including locked objects. Ignore any object errors. + err = deleteAllS3ObjectVersions(conn, bucketName, "", objectLockEnabled, true) + if err != nil { return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucketName, err) } @@ -346,6 +283,9 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &originalObj), testAccCheckAWSS3BucketObjectBody(&originalObj, "initial object state"), resource.TestCheckResourceAttr(resourceName, "etag", "647d1d58e1011c743ec67d5e8af87b53"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, { @@ -354,6 +294,9 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &modifiedObj), testAccCheckAWSS3BucketObjectBody(&modifiedObj, "modified object"), resource.TestCheckResourceAttr(resourceName, "etag", "1c7fd13df1515c2a13ad9eb068931f09"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, }, @@ -759,6 +702,196 @@ func TestAccAWSS3BucketObject_tagsLeadingSlash(t *testing.T) { }) } +func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { + var obj1, obj2, obj3 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockLegalHold(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "ON"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + // Remove legal hold but create a new object version to test force_destroy + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "changed stuff", "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithOn(t *testing.T) { + var obj1, obj2 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "ON"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockRetentionStartWithNone(t *testing.T) { + var obj1, obj2, obj3 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate), + ), + }, + // Remove retention period but create a new object version to test force_destroy + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "changed stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockRetentionStartWithSet(t *testing.T) { + var obj1, obj2, obj3, obj4 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + retainUntilDate1 := time.Now().UTC().AddDate(0, 0, 20).Format(time.RFC3339) + retainUntilDate2 := time.Now().UTC().AddDate(0, 0, 30).Format(time.RFC3339) + retainUntilDate3 := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate1), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate2), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate3), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate3), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj4, &obj3), + testAccCheckAWSS3BucketObjectBody(&obj4, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + func testAccCheckAWSS3BucketObjectVersionIdDiffers(first, second *s3.GetObjectOutput) resource.TestCheckFunc { return func(s *terraform.State) error { if first.VersionId == nil { @@ -1227,3 +1360,90 @@ resource "aws_s3_bucket_object" "object" { } `, randInt, metadataKey1, metadataValue1, metadataKey2, metadataValue2) } + +func testAccAWSS3BucketObjectConfig_noObjectLockLegalHold(randInt int, content string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + force_destroy = true +} +`, randInt, content) +} + +func testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(randInt int, content, legalHoldStatus string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + object_lock_legal_hold_status = "%s" + force_destroy = true +} +`, randInt, content, legalHoldStatus) +} + +func testAccAWSS3BucketObjectConfig_noObjectLockRetention(randInt int, content string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + force_destroy = true +} +`, randInt, content) +} + +func testAccAWSS3BucketObjectConfig_withObjectLockRetention(randInt int, content, retainUntilDate string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + force_destroy = true + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "%s" +} +`, randInt, content, retainUntilDate) +} diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index 008ca78690f..2437165bff9 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -136,6 +136,24 @@ func testS3BucketRegion(conn *s3.S3, bucket string) (string, error) { return aws.StringValue(output.LocationConstraint), nil } +func testS3BucketObjectLockEnabled(conn *s3.S3, bucket string) (bool, error) { + input := &s3.GetObjectLockConfigurationInput{ + Bucket: aws.String(bucket), + } + + output, err := conn.GetObjectLockConfiguration(input) + + if isAWSErr(err, "ObjectLockConfigurationNotFoundError", "") { + return false, nil + } + + if err != nil { + return false, err + } + + return aws.StringValue(output.ObjectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled, nil +} + func TestAccAWSS3Bucket_basic(t *testing.T) { rInt := acctest.RandInt() arnRegexp := regexp.MustCompile(`^arn:aws[\w-]*:s3:::`) @@ -1884,6 +1902,48 @@ func TestAccAWSS3Bucket_objectLock(t *testing.T) { }) } +func TestAccAWSS3Bucket_forceDestroy(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rInt := acctest.RandInt() + bucketName := fmt.Sprintf("tf-test-bucket-%d", rInt) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfig_forceDestroy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + testAccCheckAWSS3BucketAddObjects(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + +func TestAccAWSS3Bucket_forceDestroyWithObjectLockEnabled(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rInt := acctest.RandInt() + bucketName := fmt.Sprintf("tf-test-bucket-%d", rInt) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfig_forceDestroyWithObjectLockEnabled(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + testAccCheckAWSS3BucketAddObjectsWithLegalHold(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + func TestAWSS3BucketName(t *testing.T) { validDnsNames := []string{ "foobar", @@ -2098,6 +2158,47 @@ func testAccCheckAWSS3DestroyBucket(n string) resource.TestCheckFunc { } } +func testAccCheckAWSS3BucketAddObjects(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +func testAccCheckAWSS3BucketAddObjectsWithLegalHold(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + ObjectLockLegalHoldStatus: aws.String(s3.ObjectLockLegalHoldStatusOn), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + // Create an S3 bucket via a CF stack so that it has system tags. func testAccCheckAWSS3BucketCreateViaCloudFormation(n string, stackId *string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -3697,6 +3798,34 @@ resource "aws_s3_bucket" "arbitrary" { `, randInt) } +func testAccAWSS3BucketConfig_forceDestroy(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "%s" + acl = "private" + force_destroy = true +} +`, bucketName) +} + +func testAccAWSS3BucketConfig_forceDestroyWithObjectLockEnabled(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "%s" + acl = "private" + force_destroy = true + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +`, bucketName) +} + const testAccAWSS3BucketConfigBucketEmptyString = ` resource "aws_s3_bucket" "test" { bucket = "" diff --git a/website/docs/d/s3_bucket_object.html.markdown b/website/docs/d/s3_bucket_object.html.markdown index d4f789e7733..d22772d65d8 100644 --- a/website/docs/d/s3_bucket_object.html.markdown +++ b/website/docs/d/s3_bucket_object.html.markdown @@ -77,6 +77,9 @@ In addition to all arguments above, the following attributes are exported: * `expires` - The date and time at which the object is no longer cacheable. * `last_modified` - Last modified date of the object in RFC1123 format (e.g. `Mon, 02 Jan 2006 15:04:05 MST`) * `metadata` - A map of metadata stored with the object in S3 +* `object_lock_legal_hold_status` - Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). This field is only returned if you have permission to view an object's legal hold status. +* `object_lock_mode` - The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) currently in place for this object. +* `object_lock_retain_until_date` - The date and time when this object's object lock will expire. * `server_side_encryption` - If the object is stored using server-side encryption (KMS or Amazon S3-managed encryption key), this field includes the chosen encryption and algorithm used. * `sse_kms_key_id` - If present, specifies the ID of the Key Management Service (KMS) master encryption key that was used for the object. * `storage_class` - [Storage class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) information of the object. Available for all objects except for `Standard` storage class objects. diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 94b9bdde032..87d1844ba9c 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -320,7 +320,7 @@ The following arguments are supported: * `policy` - (Optional) A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a `terraform plan`. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](/docs/providers/aws/guides/iam-policy-documents.html). * `tags` - (Optional) A mapping of tags to assign to the bucket. -* `force_destroy` - (Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. +* `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `website` - (Optional) A website object (documented below). * `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below). * `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below) diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index 9944a564485..322f8a8b990 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -80,6 +80,35 @@ resource "aws_s3_bucket_object" "examplebucket_object" { } ``` +### S3 Object Lock + +```hcl +resource "aws_s3_bucket" "examplebucket" { + bucket = "examplebuckettftest" + acl = "private" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "examplebucket_object" { + key = "someobject" + bucket = "${aws_s3_bucket.examplebucket.id}" + source = "important.txt" + + object_lock_legal_hold_status = "ON" + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "2021-12-31T23:59:60Z" + + force_destroy = true +} +``` + ## Argument Reference -> **Note:** If you specify `content_encoding` you are responsible for encoding the body appropriately. `source`, `content`, and `content_base64` all expect already encoded/compressed bytes. @@ -109,6 +138,11 @@ use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` * `metadata` - (Optional) A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API). * `tags` - (Optional) A mapping of tags to assign to the object. +* `force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. +Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled. +* `object_lock_legal_hold_status` - (Optional) The [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`. +* `object_lock_mode` - (Optional) The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. +* `object_lock_retain_until_date` - (Optional) The date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). If no content is provided through `source`, `content` or `content_base64`, then the object will be empty.