Skip to content

Commit

Permalink
azurerm_kubernetes_cluster/azurerm_kubernetes_cluster_node_pool -…
Browse files Browse the repository at this point in the history
… add `drain_timeout_in_minutes` and `node_soak_duration_in_minutes` (#26137)

* `r/azurerm_kubernetes_*`: add `drain_timeout_in_minutes` to `upgrade_settings`

* fix: test fixture

* fix: panic because of interface mismatch

* fix: add drain_timeout_in_minutes for kubernetes_node_pool

* small fixes due to rebase

* fix tests

* remove println

* docs

* Added node soak parameter

Co-authored-by: Itay Grudev <[email protected]>

* upgrade test cases

Co-authored-by: Itay Grudev <[email protected]>

* Bug Fix: If typo

* Automatic style fixes

* Removed deprecated fields in 2023-09-02-preview

* update changelog

* Added params to: upgradeSettingsForDataSourceSchema

* Revert "Removed deprecated fields in 2023-09-02-preview"

This reverts commit b87cbb2.

* Removed changelog

* DRYed code with an early return

* Updated default values for drain timeout and node soak duration

* Updated defaults and schema type

* utils.Int64 -> pointer.To(

* DRYed code with an early return

* DRYed code with an early return

* Updated documentation as requested

* Updated default values

* Removed unnecessary tests, but left problematic corner cases

* Updated cluster documentation

* Trimmed the mangedCluster tests in addition to the node pool tests

* Update internal/services/containers/kubernetes_cluster_node_pool_resource.go

Co-authored-by: stephybun <[email protected]>

* Update internal/services/containers/kubernetes_cluster_node_pool_resource.go

Co-authored-by: stephybun <[email protected]>

* Update internal/services/containers/kubernetes_cluster_node_pool_resource.go

Co-authored-by: stephybun <[email protected]>

* Update internal/services/containers/migration/kubernetes_cluster_node_pool.go

Co-authored-by: stephybun <[email protected]>

* Updated tests as requested

* Fix: make fmt

* add CustomizeDiff for drain_timeout_in_minutes and node_soak_duration_in_minutes

* remove customizediff for node_soak_duration_timeout since this accepts being set to 0

* reset CHANGELOG

* update documentation on drain_timeout_in_minutes

* link rest api specs issue for customizediff

* remove default value for drain_timeout_in_minutes for 4.0 and various test fixes

---------

Co-authored-by: aristosvo <[email protected]>
Co-authored-by: Jan Schmidle <[email protected]>
Co-authored-by: Itay Grudev <[email protected]>
Co-authored-by: Itay Grudev <[email protected]>
  • Loading branch information
5 people authored May 30, 2024
1 parent 22f6b6e commit d8cc46d
Show file tree
Hide file tree
Showing 12 changed files with 174 additions and 119 deletions.
23 changes: 14 additions & 9 deletions internal/services/containers/kubernetes_cluster_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -1465,20 +1465,25 @@ func flattenKubernetesClusterDataSourceMicrosoftDefender(input *managedclusters.
}

func flattenKubernetesClusterDataSourceUpgradeSettings(input *managedclusters.AgentPoolUpgradeSettings) []interface{} {
maxSurge := ""
if input != nil && input.MaxSurge != nil {
maxSurge = *input.MaxSurge
if input == nil {
return []interface{}{}
}

if maxSurge == "" {
return []interface{}{}
values := make(map[string]interface{})

if input.MaxSurge != nil {
values["max_surge"] = *input.MaxSurge
}

return []interface{}{
map[string]interface{}{
"max_surge": maxSurge,
},
if input.DrainTimeoutInMinutes != nil {
values["drain_timeout_in_minutes"] = *input.DrainTimeoutInMinutes
}

if input.DrainTimeoutInMinutes != nil {
values["node_soak_duration_in_minutes"] = *input.NodeSoakDurationInMinutes
}

return []interface{}{values}
}

func flattenCustomCaTrustCerts(input *managedclusters.ManagedClusterSecurityProfile) []interface{} {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package containers

import (
"context"
"encoding/base64"
"fmt"
"log"
Expand Down Expand Up @@ -64,6 +65,13 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource {
}),

Schema: resourceKubernetesClusterNodePoolSchema(),

CustomizeDiff: pluginsdk.CustomDiffInSequence(
// The behaviour of the API requires this, but this could be removed when https://github.com/Azure/azure-rest-api-specs/issues/27373 has been addressed
pluginsdk.ForceNewIfChange("upgrade_settings.0.drain_timeout_in_minutes", func(ctx context.Context, old, new, meta interface{}) bool {
return old != 0 && new == 0
}),
),
}
}

Expand Down Expand Up @@ -1137,6 +1145,15 @@ func upgradeSettingsSchema() *pluginsdk.Schema {
Type: pluginsdk.TypeString,
Required: true,
},
"drain_timeout_in_minutes": {
Type: pluginsdk.TypeInt,
Optional: true,
},
"node_soak_duration_in_minutes": {
Type: pluginsdk.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(0, 30),
},
},
},
}
Expand All @@ -1152,6 +1169,15 @@ func upgradeSettingsSchema() *pluginsdk.Schema {
Optional: true,
Default: "10%",
},
"drain_timeout_in_minutes": {
Type: pluginsdk.TypeInt,
Optional: true,
},
"node_soak_duration_in_minutes": {
Type: pluginsdk.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(0, 30),
},
},
},
}
Expand All @@ -1167,6 +1193,14 @@ func upgradeSettingsForDataSourceSchema() *pluginsdk.Schema {
Type: pluginsdk.TypeString,
Computed: true,
},
"drain_timeout_in_minutes": {
Type: pluginsdk.TypeInt,
Computed: true,
},
"node_soak_duration_in_minutes": {
Type: pluginsdk.TypeInt,
Computed: true,
},
},
},
}
Expand Down Expand Up @@ -1216,31 +1250,42 @@ func expandAgentPoolKubeletConfig(input []interface{}) *agentpools.KubeletConfig
func expandAgentPoolUpgradeSettings(input []interface{}) *agentpools.AgentPoolUpgradeSettings {
setting := &agentpools.AgentPoolUpgradeSettings{}
if len(input) == 0 || input[0] == nil {
return setting
return nil
}

v := input[0].(map[string]interface{})
if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" {
setting.MaxSurge = utils.String(maxSurgeRaw)
}
if drainTimeoutInMinutesRaw, ok := v["drain_timeout_in_minutes"].(int); ok {
setting.DrainTimeoutInMinutes = pointer.To(int64(drainTimeoutInMinutesRaw))
}
if nodeSoakDurationInMinutesRaw, ok := v["node_soak_duration_in_minutes"].(int); ok {
setting.NodeSoakDurationInMinutes = pointer.To(int64(nodeSoakDurationInMinutesRaw))
}
return setting
}

func flattenAgentPoolUpgradeSettings(input *agentpools.AgentPoolUpgradeSettings) []interface{} {
maxSurge := ""
if input != nil && input.MaxSurge != nil {
maxSurge = *input.MaxSurge
if input == nil {
return []interface{}{}
}

if maxSurge == "" {
return []interface{}{}
values := make(map[string]interface{})

if input.MaxSurge != nil && *input.MaxSurge != "" {
values["max_surge"] = *input.MaxSurge
}

return []interface{}{
map[string]interface{}{
"max_surge": maxSurge,
},
if input.DrainTimeoutInMinutes != nil {
values["drain_timeout_in_minutes"] = *input.DrainTimeoutInMinutes
}

if input.NodeSoakDurationInMinutes != nil {
values["node_soak_duration_in_minutes"] = *input.NodeSoakDurationInMinutes
}

return []interface{}{values}
}

func expandNodeLabels(input map[string]interface{}) *map[string]string {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -565,29 +565,16 @@ func TestAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.upgradeSettingsConfig(data, "2"),
Config: r.upgradeSettings(data, 35, 18),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, "4"),
Config: r.upgradeSettings(data, 1, 0),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("4"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, "10%"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("10%"),
),
},
data.ImportStep(),
Expand Down Expand Up @@ -925,13 +912,6 @@ func TestAccKubernetesClusterNodePool_workloadRuntime(t *testing.T) {
),
},
data.ImportStep(),
{
Config: r.workloadRuntime(data, "WasmWasi"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
{
Config: r.workloadRuntime(data, "KataMshvVmIsolation"),
Check: acceptance.ComposeTestCheckFunc(
Expand Down Expand Up @@ -2035,13 +2015,8 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, r.templateConfig(data))
}

func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
func (r KubernetesClusterNodePoolResource) upgradeSettings(data acceptance.TestData, drainTimeout int, nodeSoakDuration int) string {
template := r.templateConfig(data)
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
Expand All @@ -2055,9 +2030,13 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 3
%s
upgrade_settings {
max_surge = "10%%"
drain_timeout_in_minutes = %d
node_soak_duration_in_minutes = %d
}
}
`, template, maxSurge)
`, template, drainTimeout, nodeSoakDuration)
}

func (r KubernetesClusterNodePoolResource) virtualNetworkAutomaticConfig(data acceptance.TestData) string {
Expand Down Expand Up @@ -2379,7 +2358,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
vm_size = "Standard_DS2_v2"
enable_auto_scaling = true
min_count = 1
max_count = 1000
max_count = 399
node_count = 1
}
`, r.templateConfig(data))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1358,7 +1358,7 @@ resource "azurerm_kubernetes_cluster" "test" {
vm_size = "Standard_DS2_v2"
enable_auto_scaling = true
min_count = 1
max_count = 1000
max_count = 399
node_count = 1
upgrade_settings {
max_surge = "10%%"
Expand Down
4 changes: 4 additions & 0 deletions internal/services/containers/kubernetes_cluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource {
),

CustomizeDiff: pluginsdk.CustomDiffInSequence(
// The behaviour of the API requires this, but this could be removed when https://github.com/Azure/azure-rest-api-specs/issues/27373 has been addressed
pluginsdk.ForceNewIfChange("default_node_pool.0.upgrade_settings.0.drain_timeout_in_minutes", func(ctx context.Context, old, new, meta interface{}) bool {
return old != 0 && new == 0
}),
// Migration of `identity` to `service_principal` is not allowed, the other way around is
pluginsdk.ForceNewIfChange("service_principal.0.client_id", func(ctx context.Context, old, new, meta interface{}) bool {
return old == "msi" || old == ""
Expand Down
39 changes: 1 addition & 38 deletions internal/services/containers/kubernetes_cluster_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type KubernetesClusterResource struct{}

var (
olderKubernetesVersion = "1.28.5"
currentKubernetesVersion = "1.29.0"
currentKubernetesVersion = "1.29.2"
olderKubernetesVersionAlias = "1.28"
currentKubernetesVersionAlias = "1.29"
)
Expand Down Expand Up @@ -710,43 +710,6 @@ resource "azurerm_kubernetes_cluster" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion, enabled)
}

func (r KubernetesClusterResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
%s
}
identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, maxSurge)
}

func TestAccResourceKubernetesCluster_roleBasedAccessControlAAD_OlderKubernetesVersion(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,35 +279,57 @@ func TestAccKubernetesCluster_upgradeSettings(t *testing.T) {

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.upgradeSettingsConfig(data, "2"),
Config: r.upgradeSettings(data, 35, 18),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, "10%"),
Config: r.upgradeSettings(data, 1, 0),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("10%"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, "2"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
})
}

func (r KubernetesClusterResource) upgradeSettings(data acceptance.TestData, drainTimeout int, nodeSoakDuration int) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
upgrade_settings {
max_surge = "10%%"
drain_timeout_in_minutes = %d
node_soak_duration_in_minutes = %d
}
}
identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, drainTimeout, nodeSoakDuration)
}

func (KubernetesClusterResource) upgradeControlPlaneConfig(data acceptance.TestData, controlPlaneVersion string) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
Loading

0 comments on commit d8cc46d

Please sign in to comment.