Skip to content

Commit

Permalink
Merge pull request #5507 from terraform-providers/f-aws_rds_cluster-e…
Browse files Browse the repository at this point in the history
…ngine_mode

resource/aws_rds_cluster: Add engine_mode argument
  • Loading branch information
bflad authored Aug 13, 2018
2 parents abbbb85 + aba2998 commit 1a97f04
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 5 deletions.
42 changes: 38 additions & 4 deletions aws/resource_aws_rds_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,17 @@ func resourceAwsRDSCluster() *schema.Resource {
ValidateFunc: validateRdsEngine(),
},

"engine_mode": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "provisioned",
ValidateFunc: validation.StringInSlice([]string{
"provisioned",
"serverless",
}, false),
},

"engine_version": {
Type: schema.TypeString,
Optional: true,
Expand All @@ -130,8 +141,18 @@ func resourceAwsRDSCluster() *schema.Resource {
"storage_encrypted": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
// Allow configuration to be unset when using engine_mode serverless, as its required to be true
// InvalidParameterCombination: Aurora Serverless DB clusters are always encrypted at rest. Encryption can't be disabled.
if d.Get("engine_mode").(string) != "serverless" {
return false
}
if new != "false" {
return false
}
return true
},
},

"s3_import": {
Expand Down Expand Up @@ -355,6 +376,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
opts := rds.RestoreDBClusterFromSnapshotInput{
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineMode: aws.String(d.Get("engine_mode").(string)),
SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)),
Tags: tags,
}
Expand Down Expand Up @@ -457,7 +479,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts := &rds.CreateDBClusterInput{
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String(d.Get("engine").(string)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
EngineMode: aws.String(d.Get("engine_mode").(string)),
ReplicationSourceIdentifier: aws.String(d.Get("replication_source_identifier").(string)),
Tags: tags,
}
Expand Down Expand Up @@ -512,6 +534,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.SourceRegion = aws.String(attr.(string))
}

if attr, ok := d.GetOkExists("storage_encrypted"); ok {
createOpts.StorageEncrypted = aws.Bool(attr.(bool))
}

if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(attr.([]interface{})) > 0 {
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}
Expand Down Expand Up @@ -553,7 +579,6 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
S3Prefix: aws.String(s3_bucket["bucket_prefix"].(string)),
SourceEngine: aws.String(s3_bucket["source_engine"].(string)),
SourceEngineVersion: aws.String(s3_bucket["source_engine_version"].(string)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
Tags: tags,
}

Expand Down Expand Up @@ -615,6 +640,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}

if attr, ok := d.GetOkExists("storage_encrypted"); ok {
createOpts.StorageEncrypted = aws.Bool(attr.(bool))
}

log.Printf("[DEBUG] RDS Cluster restore options: %s", createOpts)
// Retry for IAM/S3 eventual consistency
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
Expand Down Expand Up @@ -654,9 +683,9 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts := &rds.CreateDBClusterInput{
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineMode: aws.String(d.Get("engine_mode").(string)),
MasterUserPassword: aws.String(d.Get("master_password").(string)),
MasterUsername: aws.String(d.Get("master_username").(string)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
Tags: tags,
}

Expand Down Expand Up @@ -718,6 +747,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}

if attr, ok := d.GetOkExists("storage_encrypted"); ok {
createOpts.StorageEncrypted = aws.Bool(attr.(bool))
}

log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
var resp *rds.CreateDBClusterOutput
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
Expand Down Expand Up @@ -830,6 +863,7 @@ func flattenAwsRdsClusterResource(d *schema.ResourceData, meta interface{}, dbc
d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup)
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
d.Set("endpoint", dbc.Endpoint)
d.Set("engine_mode", dbc.EngineMode)
d.Set("engine_version", dbc.EngineVersion)
d.Set("engine", dbc.Engine)
d.Set("hosted_zone_id", dbc.HostedZoneId)
Expand Down
133 changes: 133 additions & 0 deletions aws/resource_aws_rds_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,60 @@ func TestAccAWSRDSCluster_iamAuth(t *testing.T) {
})
}

func TestAccAWSRDSCluster_EngineMode(t *testing.T) {
var dbCluster1, dbCluster2 rds.DBCluster

rName := acctest.RandomWithPrefix("tf-acc-test")
resourceName := "aws_rds_cluster.test"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRDSClusterConfig_EngineMode(rName, "serverless"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists(resourceName, &dbCluster1),
resource.TestCheckResourceAttr(resourceName, "engine_mode", "serverless"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"apply_immediately",
"cluster_identifier_prefix",
"master_password",
"skip_final_snapshot",
"snapshot_identifier",
},
},
{
Config: testAccAWSRDSClusterConfig_EngineMode(rName, "provisioned"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists(resourceName, &dbCluster2),
testAccCheckAWSClusterRecreated(&dbCluster1, &dbCluster2),
resource.TestCheckResourceAttr(resourceName, "engine_mode", "provisioned"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"apply_immediately",
"cluster_identifier_prefix",
"master_password",
"skip_final_snapshot",
"snapshot_identifier",
},
},
},
})
}

func TestAccAWSRDSCluster_EngineVersion(t *testing.T) {
var dbCluster rds.DBCluster
rInt := acctest.RandInt()
Expand Down Expand Up @@ -487,6 +541,50 @@ func TestAccAWSRDSCluster_SnapshotIdentifier(t *testing.T) {
})
}

func TestAccAWSRDSCluster_SnapshotIdentifier_EngineMode(t *testing.T) {
// NOTE: As of August 10, 2018: Attempting to create a serverless cluster
// from snapshot currently leaves those clusters stuck in "creating"
// for upwards of a few hours. AWS likely needs to resolve something
// upstream or provide a helpful error. This test is left here to
// potentially be updated in the future if that issue is resolved.
var dbCluster, sourceDbCluster rds.DBCluster
var dbClusterSnapshot rds.DBClusterSnapshot

rName := acctest.RandomWithPrefix("tf-acc-test")
sourceDbResourceName := "aws_rds_cluster.source"
snapshotResourceName := "aws_db_cluster_snapshot.test"
resourceName := "aws_rds_cluster.test"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRDSClusterConfig_SnapshotIdentifier_EngineMode(rName, "provisioned"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists(sourceDbResourceName, &sourceDbCluster),
testAccCheckDbClusterSnapshotExists(snapshotResourceName, &dbClusterSnapshot),
testAccCheckAWSClusterExists(resourceName, &dbCluster),
resource.TestCheckResourceAttr(resourceName, "engine_mode", "provisioned"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"apply_immediately",
"cluster_identifier_prefix",
"master_password",
"skip_final_snapshot",
"snapshot_identifier",
},
},
},
})
}

func TestAccAWSRDSCluster_SnapshotIdentifier_Tags(t *testing.T) {
var dbCluster, sourceDbCluster rds.DBCluster
var dbClusterSnapshot rds.DBClusterSnapshot
Expand Down Expand Up @@ -1471,6 +1569,18 @@ resource "aws_rds_cluster" "test_replica" {
`, n)
}

func testAccAWSRDSClusterConfig_EngineMode(rName, engineMode string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
cluster_identifier = %q
engine_mode = %q
master_password = "barbarbarbar"
master_username = "foo"
skip_final_snapshot = true
}
`, rName, engineMode)
}

func testAccAWSRDSClusterConfig_SnapshotIdentifier(rName string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "source" {
Expand All @@ -1493,6 +1603,29 @@ resource "aws_rds_cluster" "test" {
`, rName, rName, rName)
}

func testAccAWSRDSClusterConfig_SnapshotIdentifier_EngineMode(rName, engineMode string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "source" {
cluster_identifier = "%s-source"
master_password = "barbarbarbar"
master_username = "foo"
skip_final_snapshot = true
}
resource "aws_db_cluster_snapshot" "test" {
db_cluster_identifier = "${aws_rds_cluster.source.id}"
db_cluster_snapshot_identifier = %q
}
resource "aws_rds_cluster" "test" {
cluster_identifier = %q
engine_mode = %q
skip_final_snapshot = true
snapshot_identifier = "${aws_db_cluster_snapshot.test.id}"
}
`, rName, rName, rName, engineMode)
}

func testAccAWSRDSClusterConfig_SnapshotIdentifier_Tags(rName string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "source" {
Expand Down
5 changes: 4 additions & 1 deletion website/docs/r/rds_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ Default: A 30-minute window selected at random from an 8-hour block of time per
* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate
with the Cluster
* `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot.
* `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false` if not specified.
* `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false` for `provisioned` `engine_mode` and `true` for `serverless` `engine_mode`.
* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica.
* `apply_immediately` - (Optional) Specifies whether any cluster modifications
are applied immediately, or during the next maintenance window. Default is
Expand All @@ -118,6 +118,7 @@ Default: A 30-minute window selected at random from an 8-hour block of time per
* `iam_roles` - (Optional) A List of ARNs for the IAM roles to associate to the RDS Cluster.
* `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
* `engine` - (Optional) The name of the database engine to be used for this DB cluster. Defaults to `aurora`. Valid Values: `aurora`, `aurora-mysql`, `aurora-postgresql`
* `engine_mode` - (Optional) The database engine mode. Valid values: `provisioned`, `serverless`. Defaults to: `provisioned`. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/aurora-serverless.html) for limitations when using `serverless`.
* `engine_version` - (Optional) The database engine version.
* `source_region` - (Optional) The source region for an encrypted replica DB cluster.
* `enabled_cloudwatch_logs_exports` - (Optional) List of log types to export to cloudwatch. If omitted, no logs will be exported.
Expand All @@ -128,6 +129,8 @@ Default: A 30-minute window selected at random from an 8-hour block of time per

Full details on the core parameters and impacts are in the API Docs: [RestoreDBClusterFromS3](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBClusterFromS3.html). Requires that the S3 bucket be in the same region as the RDS cluster you're trying to create. Sample:

~> **NOTE:** RDS Aurora Serverless does not support loading data from S3, so its not possible to directly use `engine_mode` set to `serverless` with `s3_import`.

```hcl
resource "aws_rds_cluster" "db" {
engine = "aurora"
Expand Down

0 comments on commit 1a97f04

Please sign in to comment.