From d8cc46d6c8636f05342003f1289b6e9ec88ddd97 Mon Sep 17 00:00:00 2001 From: stephybun Date: Thu, 30 May 2024 08:11:38 +0200 Subject: [PATCH] `azurerm_kubernetes_cluster`/`azurerm_kubernetes_cluster_node_pool` - add `drain_timeout_in_minutes` and `node_soak_duration_in_minutes` (#26137) * `r/azurerm_kubernetes_*`: add `drain_timeout_in_minutes` to `upgrade_settings` * fix: test fixture * fix: panic because of interface mismatch * fix: add drain_timeout_in_minutes for kubernetes_node_pool * small fixes due to rebase * fix tests * remove println * docs * Added node soak parameter Co-authored-by: Itay Grudev * upgrade test cases Co-authored-by: Itay Grudev * Bug Fix: If typo * Automatic style fixes * Removed deprecated fields in 2023-09-02-preview * update changelog * Added params to: upgradeSettingsForDataSourceSchema * Revert "Removed deprecated fields in 2023-09-02-preview" This reverts commit b87cbb298b805c936c81fdaede38b9f0b2642786. * Removed changelog * DRYed code with an early return * Updated default values for drain timeout and node soak duration * Updated defaults and schema type * utils.Int64 -> pointer.To( * DRYed code with an early return * DRYed code with an early return * Updated documentation as requested * Updated default values * Removed unnecessary tests, but left problematic corner cases * Updated cluster documentation * Trimmed the mangedCluster tests in addition to the node pool tests * Update internal/services/containers/kubernetes_cluster_node_pool_resource.go Co-authored-by: stephybun * Update internal/services/containers/kubernetes_cluster_node_pool_resource.go Co-authored-by: stephybun * Update internal/services/containers/kubernetes_cluster_node_pool_resource.go Co-authored-by: stephybun * Update internal/services/containers/migration/kubernetes_cluster_node_pool.go Co-authored-by: stephybun * Updated tests as requested * Fix: make fmt * add CustomizeDiff for drain_timeout_in_minutes and node_soak_duration_in_minutes * remove customizediff for node_soak_duration_timeout since this accepts being set to 0 * reset CHANGELOG * update documentation on drain_timeout_in_minutes * link rest api specs issue for customizediff * remove default value for drain_timeout_in_minutes for 4.0 and various test fixes --------- Co-authored-by: aristosvo <8375124+aristosvo@users.noreply.github.com> Co-authored-by: Jan Schmidle Co-authored-by: Itay Grudev Co-authored-by: Itay Grudev --- .../kubernetes_cluster_data_source.go | 23 ++++--- .../kubernetes_cluster_node_pool_resource.go | 65 ++++++++++++++++--- ...ernetes_cluster_node_pool_resource_test.go | 41 +++--------- .../kubernetes_cluster_other_resource_test.go | 2 +- .../containers/kubernetes_cluster_resource.go | 4 ++ .../kubernetes_cluster_resource_test.go | 39 +---------- ...ubernetes_cluster_resource_upgrade_test.go | 52 ++++++++++----- .../containers/kubernetes_nodepool.go | 45 +++++++++---- .../docs/d/kubernetes_cluster.html.markdown | 10 ++- ...kubernetes_cluster_node_pool.html.markdown | 4 ++ .../docs/r/kubernetes_cluster.html.markdown | 4 ++ ...kubernetes_cluster_node_pool.html.markdown | 4 ++ 12 files changed, 174 insertions(+), 119 deletions(-) diff --git a/internal/services/containers/kubernetes_cluster_data_source.go b/internal/services/containers/kubernetes_cluster_data_source.go index c850a37e2871..0682b6358a9b 100644 --- a/internal/services/containers/kubernetes_cluster_data_source.go +++ b/internal/services/containers/kubernetes_cluster_data_source.go @@ -1465,20 +1465,25 @@ func flattenKubernetesClusterDataSourceMicrosoftDefender(input *managedclusters. } func flattenKubernetesClusterDataSourceUpgradeSettings(input *managedclusters.AgentPoolUpgradeSettings) []interface{} { - maxSurge := "" - if input != nil && input.MaxSurge != nil { - maxSurge = *input.MaxSurge + if input == nil { + return []interface{}{} } - if maxSurge == "" { - return []interface{}{} + values := make(map[string]interface{}) + + if input.MaxSurge != nil { + values["max_surge"] = *input.MaxSurge } - return []interface{}{ - map[string]interface{}{ - "max_surge": maxSurge, - }, + if input.DrainTimeoutInMinutes != nil { + values["drain_timeout_in_minutes"] = *input.DrainTimeoutInMinutes + } + + if input.DrainTimeoutInMinutes != nil { + values["node_soak_duration_in_minutes"] = *input.NodeSoakDurationInMinutes } + + return []interface{}{values} } func flattenCustomCaTrustCerts(input *managedclusters.ManagedClusterSecurityProfile) []interface{} { diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 7e549673c446..d9b85e7dec98 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -4,6 +4,7 @@ package containers import ( + "context" "encoding/base64" "fmt" "log" @@ -64,6 +65,13 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { }), Schema: resourceKubernetesClusterNodePoolSchema(), + + CustomizeDiff: pluginsdk.CustomDiffInSequence( + // The behaviour of the API requires this, but this could be removed when https://github.com/Azure/azure-rest-api-specs/issues/27373 has been addressed + pluginsdk.ForceNewIfChange("upgrade_settings.0.drain_timeout_in_minutes", func(ctx context.Context, old, new, meta interface{}) bool { + return old != 0 && new == 0 + }), + ), } } @@ -1137,6 +1145,15 @@ func upgradeSettingsSchema() *pluginsdk.Schema { Type: pluginsdk.TypeString, Required: true, }, + "drain_timeout_in_minutes": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "node_soak_duration_in_minutes": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 30), + }, }, }, } @@ -1152,6 +1169,15 @@ func upgradeSettingsSchema() *pluginsdk.Schema { Optional: true, Default: "10%", }, + "drain_timeout_in_minutes": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "node_soak_duration_in_minutes": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 30), + }, }, }, } @@ -1167,6 +1193,14 @@ func upgradeSettingsForDataSourceSchema() *pluginsdk.Schema { Type: pluginsdk.TypeString, Computed: true, }, + "drain_timeout_in_minutes": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + "node_soak_duration_in_minutes": { + Type: pluginsdk.TypeInt, + Computed: true, + }, }, }, } @@ -1216,31 +1250,42 @@ func expandAgentPoolKubeletConfig(input []interface{}) *agentpools.KubeletConfig func expandAgentPoolUpgradeSettings(input []interface{}) *agentpools.AgentPoolUpgradeSettings { setting := &agentpools.AgentPoolUpgradeSettings{} if len(input) == 0 || input[0] == nil { - return setting + return nil } v := input[0].(map[string]interface{}) if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" { setting.MaxSurge = utils.String(maxSurgeRaw) } + if drainTimeoutInMinutesRaw, ok := v["drain_timeout_in_minutes"].(int); ok { + setting.DrainTimeoutInMinutes = pointer.To(int64(drainTimeoutInMinutesRaw)) + } + if nodeSoakDurationInMinutesRaw, ok := v["node_soak_duration_in_minutes"].(int); ok { + setting.NodeSoakDurationInMinutes = pointer.To(int64(nodeSoakDurationInMinutesRaw)) + } return setting } func flattenAgentPoolUpgradeSettings(input *agentpools.AgentPoolUpgradeSettings) []interface{} { - maxSurge := "" - if input != nil && input.MaxSurge != nil { - maxSurge = *input.MaxSurge + if input == nil { + return []interface{}{} } - if maxSurge == "" { - return []interface{}{} + values := make(map[string]interface{}) + + if input.MaxSurge != nil && *input.MaxSurge != "" { + values["max_surge"] = *input.MaxSurge } - return []interface{}{ - map[string]interface{}{ - "max_surge": maxSurge, - }, + if input.DrainTimeoutInMinutes != nil { + values["drain_timeout_in_minutes"] = *input.DrainTimeoutInMinutes } + + if input.NodeSoakDurationInMinutes != nil { + values["node_soak_duration_in_minutes"] = *input.NodeSoakDurationInMinutes + } + + return []interface{}{values} } func expandNodeLabels(input map[string]interface{}) *map[string]string { diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 5eb395eb5a6a..0cdb6b7eb58b 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -565,29 +565,16 @@ func TestAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) { data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.upgradeSettingsConfig(data, "2"), + Config: r.upgradeSettings(data, 35, 18), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"), ), }, data.ImportStep(), { - Config: r.upgradeSettingsConfig(data, "4"), + Config: r.upgradeSettings(data, 1, 0), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("4"), - ), - }, - data.ImportStep(), - { - Config: r.upgradeSettingsConfig(data, "10%"), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("10%"), ), }, data.ImportStep(), @@ -925,13 +912,6 @@ func TestAccKubernetesClusterNodePool_workloadRuntime(t *testing.T) { ), }, data.ImportStep(), - { - Config: r.workloadRuntime(data, "WasmWasi"), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - ), - }, - data.ImportStep(), { Config: r.workloadRuntime(data, "KataMshvVmIsolation"), Check: acceptance.ComposeTestCheckFunc( @@ -2035,13 +2015,8 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, r.templateConfig(data)) } -func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string { +func (r KubernetesClusterNodePoolResource) upgradeSettings(data acceptance.TestData, drainTimeout int, nodeSoakDuration int) string { template := r.templateConfig(data) - if maxSurge != "" { - maxSurge = fmt.Sprintf(`upgrade_settings { - max_surge = %q - }`, maxSurge) - } return fmt.Sprintf(` provider "azurerm" { @@ -2055,9 +2030,13 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id vm_size = "Standard_DS2_v2" node_count = 3 - %s + upgrade_settings { + max_surge = "10%%" + drain_timeout_in_minutes = %d + node_soak_duration_in_minutes = %d + } } -`, template, maxSurge) +`, template, drainTimeout, nodeSoakDuration) } func (r KubernetesClusterNodePoolResource) virtualNetworkAutomaticConfig(data acceptance.TestData) string { @@ -2379,7 +2358,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { vm_size = "Standard_DS2_v2" enable_auto_scaling = true min_count = 1 - max_count = 1000 + max_count = 399 node_count = 1 } `, r.templateConfig(data)) diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index ccb36bf9b856..ed530f70ef35 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -1358,7 +1358,7 @@ resource "azurerm_kubernetes_cluster" "test" { vm_size = "Standard_DS2_v2" enable_auto_scaling = true min_count = 1 - max_count = 1000 + max_count = 399 node_count = 1 upgrade_settings { max_surge = "10%%" diff --git a/internal/services/containers/kubernetes_cluster_resource.go b/internal/services/containers/kubernetes_cluster_resource.go index 6889b4c467bf..d23a22c35728 100644 --- a/internal/services/containers/kubernetes_cluster_resource.go +++ b/internal/services/containers/kubernetes_cluster_resource.go @@ -67,6 +67,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource { ), CustomizeDiff: pluginsdk.CustomDiffInSequence( + // The behaviour of the API requires this, but this could be removed when https://github.com/Azure/azure-rest-api-specs/issues/27373 has been addressed + pluginsdk.ForceNewIfChange("default_node_pool.0.upgrade_settings.0.drain_timeout_in_minutes", func(ctx context.Context, old, new, meta interface{}) bool { + return old != 0 && new == 0 + }), // Migration of `identity` to `service_principal` is not allowed, the other way around is pluginsdk.ForceNewIfChange("service_principal.0.client_id", func(ctx context.Context, old, new, meta interface{}) bool { return old == "msi" || old == "" diff --git a/internal/services/containers/kubernetes_cluster_resource_test.go b/internal/services/containers/kubernetes_cluster_resource_test.go index e19af89707ab..4cc780aaf033 100644 --- a/internal/services/containers/kubernetes_cluster_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_resource_test.go @@ -23,7 +23,7 @@ type KubernetesClusterResource struct{} var ( olderKubernetesVersion = "1.28.5" - currentKubernetesVersion = "1.29.0" + currentKubernetesVersion = "1.29.2" olderKubernetesVersionAlias = "1.28" currentKubernetesVersionAlias = "1.29" ) @@ -710,43 +710,6 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion, enabled) } -func (r KubernetesClusterResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string { - if maxSurge != "" { - maxSurge = fmt.Sprintf(`upgrade_settings { - max_surge = %q - }`, maxSurge) - } - - return fmt.Sprintf(` -provider "azurerm" { - features {} -} - -resource "azurerm_resource_group" "test" { - name = "acctestRG-aks-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name - dns_prefix = "acctestaks%d" - - default_node_pool { - name = "default" - node_count = 1 - vm_size = "Standard_DS2_v2" - %s - } - - identity { - type = "SystemAssigned" - } -} - `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, maxSurge) -} - func TestAccResourceKubernetesCluster_roleBasedAccessControlAAD_OlderKubernetesVersion(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} diff --git a/internal/services/containers/kubernetes_cluster_resource_upgrade_test.go b/internal/services/containers/kubernetes_cluster_resource_upgrade_test.go index 320865744770..413df6ad9f7e 100644 --- a/internal/services/containers/kubernetes_cluster_resource_upgrade_test.go +++ b/internal/services/containers/kubernetes_cluster_resource_upgrade_test.go @@ -279,35 +279,57 @@ func TestAccKubernetesCluster_upgradeSettings(t *testing.T) { data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.upgradeSettingsConfig(data, "2"), + Config: r.upgradeSettings(data, 35, 18), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"), ), }, data.ImportStep(), { - Config: r.upgradeSettingsConfig(data, "10%"), + Config: r.upgradeSettings(data, 1, 0), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("10%"), - ), - }, - data.ImportStep(), - { - Config: r.upgradeSettingsConfig(data, "2"), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"), - check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"), ), }, data.ImportStep(), }) } +func (r KubernetesClusterResource) upgradeSettings(data acceptance.TestData, drainTimeout int, nodeSoakDuration int) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + upgrade_settings { + max_surge = "10%%" + drain_timeout_in_minutes = %d + node_soak_duration_in_minutes = %d + } + } + + identity { + type = "SystemAssigned" + } +} + `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, drainTimeout, nodeSoakDuration) +} + func (KubernetesClusterResource) upgradeControlPlaneConfig(data acceptance.TestData, controlPlaneVersion string) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index f471ef34f4ed..27f953cf25a0 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -1179,8 +1179,16 @@ func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAg agentpool.Properties.ScaleDownMode = pointer.To(agentpools.ScaleDownMode(string(*scaleDownModeNodePool))) } agentpool.Properties.UpgradeSettings = &agentpools.AgentPoolUpgradeSettings{} - if upgradeSettingsNodePool := defaultCluster.UpgradeSettings; upgradeSettingsNodePool != nil && upgradeSettingsNodePool.MaxSurge != nil && *upgradeSettingsNodePool.MaxSurge != "" { - agentpool.Properties.UpgradeSettings.MaxSurge = upgradeSettingsNodePool.MaxSurge + if upgradeSettingsNodePool := defaultCluster.UpgradeSettings; upgradeSettingsNodePool != nil { + if upgradeSettingsNodePool.MaxSurge != nil && *upgradeSettingsNodePool.MaxSurge != "" { + agentpool.Properties.UpgradeSettings.MaxSurge = upgradeSettingsNodePool.MaxSurge + } + if upgradeSettingsNodePool.DrainTimeoutInMinutes != nil { + agentpool.Properties.UpgradeSettings.DrainTimeoutInMinutes = upgradeSettingsNodePool.DrainTimeoutInMinutes + } + if upgradeSettingsNodePool.NodeSoakDurationInMinutes != nil { + agentpool.Properties.UpgradeSettings.NodeSoakDurationInMinutes = upgradeSettingsNodePool.NodeSoakDurationInMinutes + } } if workloadRuntimeNodePool := defaultCluster.WorkloadRuntime; workloadRuntimeNodePool != nil { agentpool.Properties.WorkloadRuntime = pointer.To(agentpools.WorkloadRuntime(string(*workloadRuntimeNodePool))) @@ -1829,20 +1837,25 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf } func flattenClusterNodePoolUpgradeSettings(input *managedclusters.AgentPoolUpgradeSettings) []interface{} { - maxSurge := "" - if input != nil && input.MaxSurge != nil { - maxSurge = *input.MaxSurge + if input == nil { + return []interface{}{} } - if maxSurge == "" { - return []interface{}{} + values := make(map[string]interface{}) + + if input.MaxSurge != nil && *input.MaxSurge != "" { + values["max_surge"] = *input.MaxSurge } - return []interface{}{ - map[string]interface{}{ - "max_surge": maxSurge, - }, + if input.DrainTimeoutInMinutes != nil { + values["drain_timeout_in_minutes"] = *input.DrainTimeoutInMinutes + } + + if input.NodeSoakDurationInMinutes != nil { + values["node_soak_duration_in_minutes"] = *input.NodeSoakDurationInMinutes } + + return []interface{}{values} } func flattenClusterNodePoolKubeletConfig(input *managedclusters.KubeletConfig) []interface{} { @@ -2199,13 +2212,21 @@ func findDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProfile func expandClusterNodePoolUpgradeSettings(input []interface{}) *managedclusters.AgentPoolUpgradeSettings { setting := &managedclusters.AgentPoolUpgradeSettings{} if len(input) == 0 || input[0] == nil { - return setting + return nil } v := input[0].(map[string]interface{}) if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" { setting.MaxSurge = utils.String(maxSurgeRaw) } + + if drainTimeoutInMinutesRaw, ok := v["drain_timeout_in_minutes"].(int); ok { + setting.DrainTimeoutInMinutes = pointer.To(int64(drainTimeoutInMinutesRaw)) + } + if nodeSoakDurationInMinutesRaw, ok := v["node_soak_duration_in_minutes"].(int); ok { + setting.NodeSoakDurationInMinutes = pointer.To(int64(nodeSoakDurationInMinutesRaw)) + } + return setting } diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 29cdea762be6..353ca24375e4 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -182,6 +182,10 @@ An `azure_active_directory_role_based_access_control` block exports the followin A `upgrade_settings` block exports the following: +* `drain_timeout_in_minutes` - The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. + +* `node_soak_duration_in_minutes` - The amount of time in minutes to wait after draining a node and before reimaging it and moving on to next node. + * `max_surge` - The maximum number or percentage of nodes that will be added to the Node Pool size during an upgrade. --- @@ -262,7 +266,7 @@ A `network_profile` block exports the following: * `network_plugin` - Network plugin used such as `azure` or `kubenet`. * `network_policy` - Network policy to be used with Azure CNI. e.g. `calico` or `azure` - + * `network_mode` - Network mode to be used with Azure CNI. e.g. `bridge` or `transparent` * `pod_cidr` - The CIDR used for pod IP addresses. @@ -277,7 +281,7 @@ An `oms_agent` block exports the following: * `msi_auth_for_monitoring_enabled` - Is managed identity authentication for monitoring enabled? -* `oms_agent_identity` - An `oms_agent_identity` block as defined below. +* `oms_agent_identity` - An `oms_agent_identity` block as defined below. --- @@ -301,7 +305,7 @@ An `ingress_application_gateway` block supports the following: * `subnet_id` - The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. This attribute is only set when `subnet_id` is specified when configuring the `ingress_application_gateway` addon. -* `ingress_application_gateway_identity` - An `ingress_application_gateway_identity` block as defined below. +* `ingress_application_gateway_identity` - An `ingress_application_gateway_identity` block as defined below. --- diff --git a/website/docs/d/kubernetes_cluster_node_pool.html.markdown b/website/docs/d/kubernetes_cluster_node_pool.html.markdown index cc7faa3379db..4efb5e857408 100644 --- a/website/docs/d/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/d/kubernetes_cluster_node_pool.html.markdown @@ -94,6 +94,10 @@ In addition to the Arguments listed above - the following Attributes are exporte A `upgrade_settings` block exports the following: +* `drain_timeout_in_minutes` - The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. + +* `node_soak_duration_in_minutes` - The amount of time in minutes to wait after draining a node and before reimaging it and moving on to next node. + * `max_surge` - The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. ## Timeouts diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index ac00e680354c..09df74c04030 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -945,6 +945,10 @@ A `http_proxy_config` block supports the following: A `upgrade_settings` block supports the following: +* `drain_timeout_in_minutes` - (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors pod disruption budgets for upgrades. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. + +* `node_soak_duration_in_minutes` - (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to `0`. + * `max_surge` - (Required) The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. -> **Note:** If a percentage is provided, the number of surge nodes is calculated from the `node_count` value on the current cluster. Node surge can allow a cluster to have more nodes than `max_count` during an upgrade. Ensure that your cluster has enough [IP space](https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade) during an upgrade. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index fdfd41a556df..e821b4623678 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -308,6 +308,10 @@ A `sysctl_config` block supports the following: A `upgrade_settings` block supports the following: +* `drain_timeout_in_minutes` - (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. + +* `node_soak_duration_in_minutes` - (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to `0`. + * `max_surge` - (Required) The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. ---