Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow a node pool to be created before it is destroyed #256

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
90fc509
Allow a node pool to be created before it is destroyed
Sep 4, 2019
0832ba0
simplify count for random_id.name since random_id is a safe op
Sep 5, 2019
ecfb001
Track all resources in keepers which are specified by ForceNew in nod…
Sep 5, 2019
5ea28b5
templatize lifecycle block since interpolations are not allowed
Sep 6, 2019
9b28164
Merge branch 'master' into create-before-destroy-nodepools
Sep 6, 2019
3879942
rename terraform resource names to more appropriate and descriptive …
Sep 6, 2019
445f1e3
enable random_id names when create_before_destroy is desired
Sep 7, 2019
3a9533f
if a metadata_all tag is moved to a specific node pool, it should not…
Sep 9, 2019
1f95dfb
Create new templated submodule for create_before_destroy lifecycle va…
Sep 9, 2019
4890bc2
add breadcrumb back to schemaNodeConfig for keepers list
Sep 9, 2019
d537448
s/lifecycle-variant/update-variant/
Sep 10, 2019
7208e4e
Merge remote-tracking branch 'upstream/master' into create-before-des…
Sep 10, 2019
f5ce4e4
lookup names in a hash table similar to master
Sep 10, 2019
1854e04
sort keeper values so that re-ordering has not effect
Sep 11, 2019
99a179d
Add two examples for the update_variant and test of a private zonal c…
Sep 11, 2019
2e0f788
Merge branch 'master' into create-before-destroy-nodepools
asproul Sep 12, 2019
86ad23c
generate docs after a master merge
Sep 16, 2019
82e668a
Merge branch 'master' into create-before-destroy-nodepools
Sep 27, 2019
07ccb6f
Merge branch 'master' into create-before-destroy-nodepools
Oct 3, 2019
1109742
Merge branch 'master' into create-before-destroy-nodepools
Oct 11, 2019
e80b9c2
Staying up to date to ensure lint tests pass locally
Oct 11, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion autogen/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -219,14 +219,31 @@ resource "google_container_cluster" "primary" {
/******************************************
Create Container Cluster node pools
*****************************************/
resource "random_id" "name" {
# if any node_pool definition has a create_before_destroy key, then create random_id names
asproul marked this conversation as resolved.
Show resolved Hide resolved
count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0

byte_length = 2
prefix = format("%s-", lookup(var.node_pools[count.index], "name"))

keepers = {
asproul marked this conversation as resolved.
Show resolved Hide resolved
disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100)
disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard")
preemptible = lookup(var.node_pools[count.index], "preemptible", false)
local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0)
image_type = lookup(var.node_pools[count.index], "image_type", "COS")
machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")
}
}

resource "google_container_node_pool" "pools" {
{% if beta_cluster %}
provider = google-beta
{% else %}
provider = google
{% endif %}
count = length(var.node_pools)
name = var.node_pools[count.index]["name"]
name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name")
project = var.project_id
location = local.location
cluster = google_container_cluster.primary.name
Expand Down Expand Up @@ -342,6 +359,7 @@ resource "google_container_node_pool" "pools" {

lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null)
}

timeouts {
Expand Down
20 changes: 19 additions & 1 deletion cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,27 @@ resource "google_container_cluster" "primary" {
/******************************************
Create Container Cluster node pools
*****************************************/
resource "random_id" "name" {
# if any node_pool definition has a create_before_destroy key, then create random_id names
count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0

byte_length = 2
prefix = format("%s-", lookup(var.node_pools[count.index], "name"))

keepers = {
disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100)
disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard")
preemptible = lookup(var.node_pools[count.index], "preemptible", false)
local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0)
image_type = lookup(var.node_pools[count.index], "image_type", "COS")
machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")
}
}

resource "google_container_node_pool" "pools" {
provider = google
count = length(var.node_pools)
name = var.node_pools[count.index]["name"]
name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name")
project = var.project_id
location = local.location
cluster = google_container_cluster.primary.name
Expand Down Expand Up @@ -217,6 +234,7 @@ resource "google_container_node_pool" "pools" {

lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null)
}

timeouts {
Expand Down
20 changes: 19 additions & 1 deletion modules/beta-private-cluster/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -205,10 +205,27 @@ resource "google_container_cluster" "primary" {
/******************************************
Create Container Cluster node pools
*****************************************/
resource "random_id" "name" {
# if any node_pool definition has a create_before_destroy key, then create random_id names
count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0

byte_length = 2
prefix = format("%s-", lookup(var.node_pools[count.index], "name"))

keepers = {
disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100)
disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard")
preemptible = lookup(var.node_pools[count.index], "preemptible", false)
local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0)
image_type = lookup(var.node_pools[count.index], "image_type", "COS")
machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")
}
}

resource "google_container_node_pool" "pools" {
provider = google-beta
count = length(var.node_pools)
name = var.node_pools[count.index]["name"]
name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name")
project = var.project_id
location = local.location
cluster = google_container_cluster.primary.name
Expand Down Expand Up @@ -318,6 +335,7 @@ resource "google_container_node_pool" "pools" {

lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null)
}

timeouts {
Expand Down
20 changes: 19 additions & 1 deletion modules/beta-public-cluster/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,27 @@ resource "google_container_cluster" "primary" {
/******************************************
Create Container Cluster node pools
*****************************************/
resource "random_id" "name" {
# if any node_pool definition has a create_before_destroy key, then create random_id names
count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0

byte_length = 2
prefix = format("%s-", lookup(var.node_pools[count.index], "name"))

keepers = {
disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100)
disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard")
preemptible = lookup(var.node_pools[count.index], "preemptible", false)
local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0)
image_type = lookup(var.node_pools[count.index], "image_type", "COS")
machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")
}
}

resource "google_container_node_pool" "pools" {
provider = google-beta
count = length(var.node_pools)
name = var.node_pools[count.index]["name"]
name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name")
project = var.project_id
location = local.location
cluster = google_container_cluster.primary.name
Expand Down Expand Up @@ -313,6 +330,7 @@ resource "google_container_node_pool" "pools" {

lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null)
}

timeouts {
Expand Down
20 changes: 19 additions & 1 deletion modules/private-cluster/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,27 @@ resource "google_container_cluster" "primary" {
/******************************************
Create Container Cluster node pools
*****************************************/
resource "random_id" "name" {
# if any node_pool definition has a create_before_destroy key, then create random_id names
count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0

byte_length = 2
prefix = format("%s-", lookup(var.node_pools[count.index], "name"))

keepers = {
disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100)
disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard")
preemptible = lookup(var.node_pools[count.index], "preemptible", false)
local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0)
image_type = lookup(var.node_pools[count.index], "image_type", "COS")
machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")
}
}

resource "google_container_node_pool" "pools" {
provider = google
count = length(var.node_pools)
name = var.node_pools[count.index]["name"]
name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name")
project = var.project_id
location = local.location
cluster = google_container_cluster.primary.name
Expand Down Expand Up @@ -222,6 +239,7 @@ resource "google_container_node_pool" "pools" {

lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null)
}

timeouts {
Expand Down