Skip to content

Commit

Permalink
Add support for resizing a node pool defined in google_container_clus…
Browse files Browse the repository at this point in the history
…ter (hashicorp#331)

* Add support for resizing a node pool defined in google_container_cluster

* add initial node count back but make it deprecated
  • Loading branch information
danawillow authored Aug 18, 2017
1 parent d03d247 commit 741f614
Show file tree
Hide file tree
Showing 2 changed files with 151 additions and 24 deletions.
84 changes: 74 additions & 10 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"google.golang.org/api/container/v1"
)

Expand Down Expand Up @@ -237,9 +238,18 @@ func resourceContainerCluster() *schema.Resource {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"initial_node_count": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
Deprecated: "Use node_count instead",
},

"node_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
},

"name": {
Expand Down Expand Up @@ -374,7 +384,20 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
nodePools := make([]*container.NodePool, 0, nodePoolsCount)
for i := 0; i < nodePoolsCount; i++ {
prefix := fmt.Sprintf("node_pool.%d", i)
nodeCount := d.Get(prefix + ".initial_node_count").(int)

nodeCount := 0
if initialNodeCount, ok := d.GetOk(prefix + ".initial_node_count"); ok {
nodeCount = initialNodeCount.(int)
}
if nc, ok := d.GetOk(prefix + ".node_count"); ok {
if nodeCount != 0 {
return fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %d", i)
}
nodeCount = nc.(int)
}
if nodeCount == 0 {
return fmt.Errorf("Node pool %d cannot be set with 0 node count", i)
}

name, err := generateNodePoolName(prefix, d)
if err != nil {
Expand Down Expand Up @@ -472,7 +495,11 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
d.Set("network", d.Get("network").(string))
d.Set("subnetwork", cluster.Subnetwork)
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools))
nps, err := flattenClusterNodePools(d, config, cluster.NodePools)
if err != nil {
return err
}
d.Set("node_pool", nps)

if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil {
return err
Expand Down Expand Up @@ -597,6 +624,31 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
d.SetPartial("enable_legacy_abac")
}

if n, ok := d.GetOk("node_pool.#"); ok {
for i := 0; i < n.(int); i++ {
if d.HasChange(fmt.Sprintf("node_pool.%d.node_count", i)) {
newSize := int64(d.Get(fmt.Sprintf("node_pool.%d.node_count", i)).(int))
req := &container.SetNodePoolSizeRequest{
NodeCount: newSize,
}
npName := d.Get(fmt.Sprintf("node_pool.%d.name", i)).(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zoneName, clusterName, npName, req).Do()
if err != nil {
return err
}

// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE node pool size", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}

log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize)
}
}
d.SetPartial("node_pool")
}

d.Partial(false)

return resourceContainerClusterRead(d, meta)
Expand Down Expand Up @@ -679,22 +731,34 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{}
return config
}

func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} {
count := len(c)

nodePools := make([]map[string]interface{}, 0, count)
func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*container.NodePool) ([]map[string]interface{}, error) {
nodePools := make([]map[string]interface{}, 0, len(c))

for i, np := range c {
// Node pools don't expose the current node count in their API, so read the
// instance groups instead. They should all have the same size, but in case a resize
// failed or something else strange happened, we'll just use the average size.
size := 0
for _, url := range np.InstanceGroupUrls {
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers)
matches := instanceGroupManagerURL.FindStringSubmatch(url)
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
if err != nil {
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
}
size += int(igm.TargetSize)
}
nodePool := map[string]interface{}{
"name": np.Name,
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)),
"initial_node_count": np.InitialNodeCount,
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenClusterNodeConfig(np.Config),
}
nodePools = append(nodePools, nodePool)
}

return nodePools
return nodePools, nil
}

func generateNodePoolName(prefix string, d *schema.ResourceData) (string, error) {
Expand Down
91 changes: 77 additions & 14 deletions google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,11 @@ import (

"strconv"

"regexp"

"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"regexp"
)

func TestAccContainerCluster_basic(t *testing.T) {
Expand Down Expand Up @@ -252,6 +253,34 @@ func TestAccContainerCluster_withNodePoolBasic(t *testing.T) {
})
}

func TestAccContainerCluster_withNodePoolResize(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
npName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolAdditionalZones(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "2"),
),
},
{
Config: testAccContainerCluster_withNodePoolResize(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "3"),
),
},
},
})
}

func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Expand Down Expand Up @@ -416,9 +445,7 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {

for i, np := range cluster.NodePools {
prefix := fmt.Sprintf("node_pool.%d.", i)
clusterTests = append(clusterTests,
clusterTestField{prefix + "name", np.Name},
clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)})
clusterTests = append(clusterTests, clusterTestField{prefix + "name", np.Name})
if np.Config != nil {
clusterTests = append(clusterTests,
clusterTestField{prefix + "node_config.0.machine_type", np.Config.MachineType},
Expand Down Expand Up @@ -827,6 +854,42 @@ resource "google_container_cluster" "with_node_pool" {
}
}`, acctest.RandString(10), acctest.RandString(10))

func testAccContainerCluster_withNodePoolAdditionalZones(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
additional_zones = [
"us-central1-b",
"us-central1-c"
]
node_pool {
name = "%s"
node_count = 2
}
}`, cluster, nodePool)
}

func testAccContainerCluster_withNodePoolResize(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
additional_zones = [
"us-central1-b",
"us-central1-c"
]
node_pool {
name = "%s"
node_count = 3
}
}`, cluster, nodePool)
}

var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_name_prefix" {
name = "tf-cluster-nodepool-test-%s"
Expand All @@ -838,8 +901,8 @@ resource "google_container_cluster" "with_node_pool_name_prefix" {
}
node_pool {
name_prefix = "tf-np-test"
initial_node_count = 2
name_prefix = "tf-np-test"
node_count = 2
}
}`, acctest.RandString(10))

Expand All @@ -854,13 +917,13 @@ resource "google_container_cluster" "with_node_pool_multiple" {
}
node_pool {
name = "tf-cluster-nodepool-test-%s"
initial_node_count = 2
name = "tf-cluster-nodepool-test-%s"
node_count = 2
}
node_pool {
name = "tf-cluster-nodepool-test-%s"
initial_node_count = 3
name = "tf-cluster-nodepool-test-%s"
node_count = 3
}
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))

Expand All @@ -876,9 +939,9 @@ resource "google_container_cluster" "with_node_pool_multiple" {
node_pool {
# ERROR: name and name_prefix cannot be both specified
name = "tf-cluster-nodepool-test-%s"
name_prefix = "tf-cluster-nodepool-test-"
initial_node_count = 1
name = "tf-cluster-nodepool-test-%s"
name_prefix = "tf-cluster-nodepool-test-"
node_count = 1
}
}`, acctest.RandString(10), acctest.RandString(10))

Expand All @@ -890,7 +953,7 @@ resource "google_container_cluster" "with_node_pool_node_config" {
zone = "us-central1-a"
node_pool {
name = "tf-cluster-nodepool-test-%s"
initial_node_count = 2
node_count = 2
node_config {
machine_type = "n1-standard-1"
disk_size_gb = 15
Expand Down

0 comments on commit 741f614

Please sign in to comment.