Skip to content

Commit

Permalink
Merge pull request #4741 from terraform-providers/f-aws_elasticache_c…
Browse files Browse the repository at this point in the history
…luster-preferred_availability_zones

resource/aws_elasticache_cluster: Migrate from availability_zones TypeSet attribute to preferred_availability_zones TypeList attribute
  • Loading branch information
bflad authored Jun 13, 2018
2 parents 7a46d54 + 1fa9fb8 commit 6d0e517
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 78 deletions.
37 changes: 33 additions & 4 deletions aws/resource_aws_elasticache_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,9 @@ func resourceAwsElasticacheCluster() *schema.Resource {
ForceNew: true,
}

resourceSchema["availability_zones"].ConflictsWith = []string{"preferred_availability_zones"}
resourceSchema["availability_zones"].Deprecated = "Use `preferred_availability_zones` instead"

resourceSchema["configuration_endpoint"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -215,6 +218,12 @@ func resourceAwsElasticacheCluster() *schema.Resource {
},
}

resourceSchema["preferred_availability_zones"] = &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
}

resourceSchema["replication_group_id"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -400,10 +409,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req.PreferredAvailabilityZone = aws.String(v.(string))
}

preferred_azs := d.Get("availability_zones").(*schema.Set).List()
if len(preferred_azs) > 0 {
azs := expandStringList(preferred_azs)
req.PreferredAvailabilityZones = azs
if v, ok := d.GetOk("preferred_availability_zones"); ok && len(v.([]interface{})) > 0 {
req.PreferredAvailabilityZones = expandStringList(v.([]interface{}))
} else {
preferred_azs := d.Get("availability_zones").(*schema.Set).List()
if len(preferred_azs) > 0 {
azs := expandStringList(preferred_azs)
req.PreferredAvailabilityZones = azs
}
}

id, err := createElasticacheCacheCluster(conn, req)
Expand Down Expand Up @@ -589,6 +602,22 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n)
nodesToRemove := getCacheNodesToRemove(d, o, o-n)
req.CacheNodeIdsToRemove = nodesToRemove
} else {
log.Printf("[INFO] Cluster %s is marked for increasing cache nodes from %d to %d", d.Id(), o, n)
// SDK documentation for NewAvailabilityZones states:
// The list of Availability Zones where the new Memcached cache nodes are created.
//
// This parameter is only valid when NumCacheNodes in the request is greater
// than the sum of the number of active cache nodes and the number of cache
// nodes pending creation (which may be zero). The number of Availability Zones
// supplied in this list must match the cache nodes being added in this request.
if v, ok := d.GetOk("preferred_availability_zones"); ok && len(v.([]interface{})) > 0 {
// Here we check the list length to prevent a potential panic :)
if len(v.([]interface{})) != n {
return fmt.Errorf("length of preferred_availability_zones (%d) must match num_cache_nodes (%d)", len(v.([]interface{})), n)
}
req.NewAvailabilityZones = expandStringList(v.([]interface{})[o:])
}
}

req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
Expand Down
164 changes: 92 additions & 72 deletions aws/resource_aws_elasticache_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,35 +245,86 @@ func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
})
}

func TestAccAWSElasticacheCluster_decreasingCacheNodes(t *testing.T) {
func TestAccAWSElasticacheCluster_NumCacheNodes_Decrease(t *testing.T) {
var ec elasticache.CacheCluster
rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(8))
resourceName := "aws_elasticache_cluster.bar"

ri := acctest.RandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes, ri, ri, acctest.RandString(10))
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes_update, ri, ri, acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSElasticacheClusterConfig_NumCacheNodes(rName, 3),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "3"),
),
},
{
Config: testAccAWSElasticacheClusterConfig_NumCacheNodes(rName, 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "1"),
),
},
},
})
}

func TestAccAWSElasticacheCluster_NumCacheNodes_Increase(t *testing.T) {
var ec elasticache.CacheCluster
rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(8))
resourceName := "aws_elasticache_cluster.bar"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: preConfig,
Config: testAccAWSElasticacheClusterConfig_NumCacheNodes(rName, 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "3"),
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "1"),
),
},
{
Config: testAccAWSElasticacheClusterConfig_NumCacheNodes(rName, 3),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "3"),
),
},
},
})
}

func TestAccAWSElasticacheCluster_NumCacheNodes_IncreaseWithPreferredAvailabilityZones(t *testing.T) {
var ec elasticache.CacheCluster
rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(8))
resourceName := "aws_elasticache_cluster.bar"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: postConfig,
Config: testAccAWSElasticacheClusterConfig_NumCacheNodesWithPreferredAvailabilityZones(rName, 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "1"),
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "1"),
resource.TestCheckResourceAttr(resourceName, "preferred_availability_zones.#", "1"),
),
},
{
Config: testAccAWSElasticacheClusterConfig_NumCacheNodesWithPreferredAvailabilityZones(rName, 3),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheClusterExists(resourceName, &ec),
resource.TestCheckResourceAttr(resourceName, "num_cache_nodes", "3"),
resource.TestCheckResourceAttr(resourceName, "preferred_availability_zones.#", "3"),
),
},
},
Expand Down Expand Up @@ -963,70 +1014,39 @@ resource "aws_elasticache_cluster" "bar" {
}
`

var testAccAWSElasticacheClusterConfigDecreasingNodes = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
func testAccAWSElasticacheClusterConfig_NumCacheNodes(rName string, numCacheNodes int) string {
return fmt.Sprintf(`
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-%s"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 3
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
}
`

var testAccAWSElasticacheClusterConfigDecreasingNodes_update = `
provider "aws" {
region = "us-east-1"
apply_immediately = true
cluster_id = "%s"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = %d
parameter_group_name = "default.memcached1.4"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
`, rName, numCacheNodes)
}

resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
func testAccAWSElasticacheClusterConfig_NumCacheNodesWithPreferredAvailabilityZones(rName string, numCacheNodes int) string {
preferredAvailabilityZones := make([]string, numCacheNodes)
for i := range preferredAvailabilityZones {
preferredAvailabilityZones[i] = `"${data.aws_availability_zones.available.names[0]}"`
}

return fmt.Sprintf(`
data "aws_availability_zones" "available" {}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-%s"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
apply_immediately = true
cluster_id = "%s"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = %d
parameter_group_name = "default.memcached1.4"
preferred_availability_zones = [%s]
}
`, rName, numCacheNodes, strings.Join(preferredAvailabilityZones, ","))
}
`

var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" {
Expand Down Expand Up @@ -1142,7 +1162,7 @@ resource "aws_elasticache_cluster" "bar" {
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.memcached1.4"
az_mode = "cross-az"
availability_zones = [
preferred_availability_zones = [
"us-west-2a",
"us-west-2b"
]
Expand Down
6 changes: 4 additions & 2 deletions website/docs/r/elasticache_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,11 @@ SNS topic to send ElastiCache notifications to. Example:

* `az_mode` - (Optional, Memcached only) Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`

* `availability_zone` - (Optional) The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `availability_zones`
* `availability_zone` - (Optional) The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.

* `availability_zones` - (Optional, Memcached only) List of Availability Zones in which the cache nodes will be created. If you want to create cache nodes in single-az, use `availability_zone`
* `availability_zones` - (*DEPRECATED*, Optional, Memcached only) Use `preferred_availability_zones` instead unless you want to create cache nodes in single-az, then use `availability_zone`. Set of Availability Zones in which the cache nodes will be created.

* `preferred_availability_zones` - (Optional, Memcached only) A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.

* `tags` - (Optional) A mapping of tags to assign to the resource

Expand Down

0 comments on commit 6d0e517

Please sign in to comment.