Skip to content

Commit

Permalink
Add support for google_container_node_pool management (#669)
Browse files Browse the repository at this point in the history
* add support for `google_container_node_pool` management (sans-tests)

* [review] add tests, docs, general cleanup

* add docs

* [review] amend test to check updates and terraform fmt

* test updates, make nested management fields non-computed
  • Loading branch information
davidquarles authored and danawillow committed Nov 21, 2017
1 parent 9aeb58e commit 34297fe
Show file tree
Hide file tree
Showing 3 changed files with 154 additions and 1 deletion.
73 changes: 73 additions & 0 deletions google/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,28 @@ var schemaNodePool = map[string]*schema.Schema{
Deprecated: "Use node_count instead",
},

"management": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"auto_repair": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},

"auto_upgrade": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},

"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -301,6 +323,19 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool,
}
}

if v, ok := d.GetOk(prefix + "management"); ok {
managementConfig := v.([]interface{})[0].(map[string]interface{})
np.Management = &container.NodeManagement{}

if v, ok := managementConfig["auto_repair"]; ok {
np.Management.AutoRepair = v.(bool)
}

if v, ok := managementConfig["auto_upgrade"]; ok {
np.Management.AutoUpgrade = v.(bool)
}
}

return np, nil
}

Expand Down Expand Up @@ -335,6 +370,13 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP
}
}

nodePool["management"] = []map[string]interface{}{
{
"auto_repair": np.Management.AutoRepair,
"auto_upgrade": np.Management.AutoUpgrade,
},
}

return nodePool, nil
}

Expand Down Expand Up @@ -412,6 +454,37 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi
}
}

if d.HasChange(prefix + "management") {
management := &container.NodeManagement{}
if v, ok := d.GetOk(prefix + "management"); ok {
managementConfig := v.([]interface{})[0].(map[string]interface{})
management.AutoRepair = managementConfig["auto_repair"].(bool)
management.AutoUpgrade = managementConfig["auto_upgrade"].(bool)
management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"}
}
req := &container.SetNodePoolManagementRequest{
Management: management,
}
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetManagement(
project, zone, clusterName, npName, req).Do()

if err != nil {
return err
}

// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool management", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}

log.Printf("[INFO] Updated management in Node Pool %s", npName)

if prefix == "" {
d.SetPartial("management")
}
}

return nil
}

Expand Down
71 changes: 71 additions & 0 deletions google/resource_container_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,51 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
})
}

func TestAccContainerNodePool_withManagement(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
nodePool := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
management := `
management {
auto_repair = "true"
auto_upgrade = "true"
}`

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_withManagement(cluster, nodePool, ""),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_management"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.#", "1"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "false"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "false"),
),
},
resource.TestStep{
Config: testAccContainerNodePool_withManagement(cluster, nodePool, management),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches(
"google_container_node_pool.np_with_management"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.#", "1"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "true"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "true"),
),
},
},
})
}

func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -232,6 +277,8 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {

nodepoolTests := []nodepoolTestField{
{"initial_node_count", strconv.FormatInt(nodepool.InitialNodeCount, 10)},
{"management.0.auto_repair", nodepool.Management.AutoRepair},
{"management.0.auto_upgrade", nodepool.Management.AutoUpgrade},
{"node_config.0.machine_type", nodepool.Config.MachineType},
{"node_config.0.disk_size_gb", strconv.FormatInt(nodepool.Config.DiskSizeGb, 10)},
{"node_config.0.local_ssd_count", strconv.FormatInt(nodepool.Config.LocalSsdCount, 10)},
Expand Down Expand Up @@ -416,6 +463,30 @@ resource "google_container_node_pool" "np" {
}`, cluster, nodePool)
}

func testAccContainerNodePool_withManagement(cluster, nodePool, management string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
}
resource "google_container_node_pool" "np_with_management" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
%s
node_config {
machine_type = "g1-small"
disk_size_gb = 10
oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"]
}
}`, cluster, nodePool, management)
}

func nodepoolCheckMatch(attributes map[string]string, attr string, gcp interface{}) string {
if gcpList, ok := gcp.([]string); ok {
if _, ok := nodepoolSetFields[attr]; ok {
Expand Down
11 changes: 10 additions & 1 deletion website/docs/r/container_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,17 @@ resource "google_container_cluster" "primary" {
* `initial_node_count` - (Deprecated, Optional) The initial node count for the pool.
Use `node_count` instead.

* `management` - (Optional) Node management configuration, wherein auto-repair and
auto-upgrade is configured. Structure is documented below.

* `name` - (Optional) The name of the node pool. If left blank, Terraform will
auto-generate a unique name.

* `name_prefix` - (Optional) Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with `name`.

* `node_config` - (Optional) The node configuration of the pool. See
[google_container_cluster](container_cluster.html for schema.
[google_container_cluster](container_cluster.html) for schema.

* `node_count` - (Optional) The number of nodes per instance group.

Expand All @@ -84,6 +87,12 @@ The `autoscaling` block supports:

* `max_node_count` - (Required) Maximum number of nodes in the NodePool. Must be >= min_node_count.

The `management` block supports:

* `auto_repair` - (Optional) Whether the nodes will be automatically repaired.

* `auto_upgrade` - (Optional) Whether the nodes will be automatically upgraded.

## Import

Node pools can be imported using the `zone`, `cluster` and `name`, e.g.
Expand Down

0 comments on commit 34297fe

Please sign in to comment.