-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add support for resizing a node pool defined in google_container_cluster #331
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,7 +29,7 @@ func resourceContainerCluster() *schema.Resource { | |
Delete: schema.DefaultTimeout(10 * time.Minute), | ||
}, | ||
|
||
SchemaVersion: 1, | ||
SchemaVersion: 2, | ||
MigrateState: resourceContainerClusterMigrateState, | ||
|
||
Schema: map[string]*schema.Schema{ | ||
|
@@ -236,10 +236,9 @@ func resourceContainerCluster() *schema.Resource { | |
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"initial_node_count": { | ||
"node_count": { | ||
Type: schema.TypeInt, | ||
Required: true, | ||
ForceNew: true, | ||
}, | ||
|
||
"name": { | ||
|
@@ -374,7 +373,8 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er | |
nodePools := make([]*container.NodePool, 0, nodePoolsCount) | ||
for i := 0; i < nodePoolsCount; i++ { | ||
prefix := fmt.Sprintf("node_pool.%d", i) | ||
nodeCount := d.Get(prefix + ".initial_node_count").(int) | ||
|
||
nodeCount := d.Get(prefix + ".node_count").(int) | ||
|
||
name, err := generateNodePoolName(prefix, d) | ||
if err != nil { | ||
|
@@ -472,7 +472,11 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro | |
d.Set("network", d.Get("network").(string)) | ||
d.Set("subnetwork", cluster.Subnetwork) | ||
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) | ||
d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools)) | ||
nps, err := flattenClusterNodePools(d, config, cluster.NodePools) | ||
if err != nil { | ||
return err | ||
} | ||
d.Set("node_pool", nps) | ||
|
||
if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { | ||
return err | ||
|
@@ -597,6 +601,32 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er | |
d.SetPartial("enable_legacy_abac") | ||
} | ||
|
||
if n, ok := d.GetOk("node_pool.#"); ok { | ||
for i := 0; i < n.(int); i++ { | ||
if d.HasChange(fmt.Sprintf("node_pool.%d.node_count", i)) { | ||
newSize := int64(d.Get(fmt.Sprintf("node_pool.%d.node_count", i)).(int)) | ||
req := &container.SetNodePoolSizeRequest{ | ||
NodeCount: newSize, | ||
} | ||
npName := d.Get(fmt.Sprintf("node_pool.%d.name", i)).(string) | ||
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zoneName, clusterName, npName, req).Do() | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Wait until it's updated | ||
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE node pool size", timeoutInMinutes, 2) | ||
if waitErr != nil { | ||
return waitErr | ||
} | ||
|
||
log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize) | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: remove extra line. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
} | ||
} | ||
d.SetPartial("node_pool") | ||
} | ||
|
||
d.Partial(false) | ||
|
||
return resourceContainerClusterRead(d, meta) | ||
|
@@ -679,22 +709,33 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} | |
return config | ||
} | ||
|
||
func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} { | ||
count := len(c) | ||
|
||
nodePools := make([]map[string]interface{}, 0, count) | ||
func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*container.NodePool) ([]map[string]interface{}, error) { | ||
nodePools := make([]map[string]interface{}, 0, len(c)) | ||
|
||
for i, np := range c { | ||
// Node pools don't expose the current node count in their API, so read the | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Have we talked with the GKE folks about updating their API to include the node count? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yup (I can show you the thread off-github) |
||
// instance groups instead. They should all have the same size, but in case a resize | ||
// failed or something else strange happened, we'll just use the average size. | ||
size := 0 | ||
for _, url := range np.InstanceGroupUrls { | ||
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers) | ||
matches := instanceGroupManagerURL.FindStringSubmatch(url) | ||
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() | ||
if err != nil { | ||
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) | ||
} | ||
size += int(igm.TargetSize) | ||
} | ||
nodePool := map[string]interface{}{ | ||
"name": np.Name, | ||
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), | ||
"initial_node_count": np.InitialNodeCount, | ||
"node_config": flattenClusterNodeConfig(np.Config), | ||
"name": np.Name, | ||
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), | ||
"node_config": flattenClusterNodeConfig(np.Config), | ||
"node_count": size / len(np.InstanceGroupUrls), | ||
} | ||
nodePools = append(nodePools, nodePool) | ||
} | ||
|
||
return nodePools | ||
return nodePools, nil | ||
} | ||
|
||
func generateNodePoolName(prefix string, d *schema.ResourceData) (string, error) { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we keep the old field and marked it as deprecated or removed? this way, we can show an error message to the user
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.