Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_kubernetes_cluster supports os_sku #13284

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,17 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource {
}, false),
},

"os_sku": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
Default: string(containerservice.OSSKUUbuntu),
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.OSSKUUbuntu),
string(containerservice.OSSKUCBLMariner),
}, false),
},

"os_type": {
Type: pluginsdk.TypeString,
Optional: true,
Expand Down Expand Up @@ -330,6 +341,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int
evictionPolicy := d.Get("eviction_policy").(string)
mode := containerservice.AgentPoolMode(d.Get("mode").(string))
osType := d.Get("os_type").(string)
osSku := d.Get("os_sku").(string)
priority := d.Get("priority").(string)
spotMaxPrice := d.Get("spot_max_price").(float64)
t := d.Get("tags").(map[string]interface{})
Expand All @@ -338,6 +350,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int

profile := containerservice.ManagedClusterAgentPoolProfileProperties{
OsType: containerservice.OSType(osType),
OsSKU: containerservice.OSSKU(osSku),
EnableAutoScaling: utils.Bool(enableAutoScaling),
EnableFIPS: utils.Bool(d.Get("fips_enabled").(bool)),
EnableUltraSSD: utils.Bool(d.Get("ultra_ssd_enabled").(bool)),
Expand Down Expand Up @@ -760,6 +773,7 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter
}
d.Set("os_disk_type", osDiskType)
d.Set("os_type", string(props.OsType))
d.Set("os_sku", string(props.OsSKU))
d.Set("pod_subnet_id", props.PodSubnetID)

// not returned from the API if not Spot
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,12 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){
"nodeTaints": testAccKubernetesClusterNodePool_nodeTaints,
"podSubnet": testAccKubernetesClusterNodePool_podSubnet,
"requiresImport": testAccKubernetesClusterNodePool_requiresImport,
"ultraSSD": testAccKubernetesClusterNodePool_ultraSSD,
"spot": testAccKubernetesClusterNodePool_spot,
"osDiskSizeGB": testAccKubernetesClusterNodePool_osDiskSizeGB,
"proximityPlacementGroupId": testAccKubernetesClusterNodePool_proximityPlacementGroupId,
"osDiskType": testAccKubernetesClusterNodePool_osDiskType,
"osSku": testAccKubernetesClusterNodePool_osSku,
"modeSystem": testAccKubernetesClusterNodePool_modeSystem,
"modeUpdate": testAccKubernetesClusterNodePool_modeUpdate,
"upgradeSettings": testAccKubernetesClusterNodePool_upgradeSettings,
Expand Down Expand Up @@ -918,6 +920,26 @@ func testAccKubernetesClusterNodePool_ultraSSD(t *testing.T) {
})
}

func TestAccKubernetesClusterNodePool_osSku(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesClusterNodePool_osSku(t)
}

func testAccKubernetesClusterNodePool_osSku(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.osSku(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) {
id, err := parse.NodePoolID(state.ID)
if err != nil {
Expand Down Expand Up @@ -2061,3 +2083,35 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, ultraSSDEnabled)
}

func (KubernetesClusterNodePoolResource) osSku(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2s_v3"
}
identity {
type = "SystemAssigned"
}
}
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_D2s_v3"
os_sku = "Ubuntu"
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger)
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,16 @@ var kubernetesOtherTests = map[string]func(t *testing.T){
"windowsProfileLicense": testAccKubernetesCluster_windowsProfileLicense,
"updateWindowsProfileLicense": TestAccKubernetesCluster_updateWindowsProfileLicense,
"outboundTypeLoadBalancer": testAccKubernetesCluster_outboundTypeLoadBalancer,
"osSku": testAccKubernetesCluster_osSku,
"privateClusterOn": testAccKubernetesCluster_privateClusterOn,
"privateClusterOff": testAccKubernetesCluster_privateClusterOff,
"privateClusterPublicFqdn": testAccKubernetesCluster_privateClusterPublicFqdn,
"privateClusterPrivateDNS": testAccKubernetesCluster_privateClusterOnWithPrivateDNSZone,
"privateClusterPrivateDNSSystem": testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneSystem,
"privateClusterPrivateDNSAndSP": testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneAndServicePrincipal,
"privateClusterPrivateDNSSubDomain": testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneSubDomain,
"upgradeChannel": testAccKubernetesCluster_upgradeChannel,
"ultraSSD": testAccKubernetesCluster_ultraSSD,
}

func TestAccKubernetesCluster_basicAvailabilitySet(t *testing.T) {
Expand Down Expand Up @@ -753,6 +756,26 @@ func testAccKubernetesCluster_privateClusterPublicFqdn(t *testing.T) {
})
}

func TestAccKubernetesCluster_osSku(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesCluster_osSku(t)
}

func testAccKubernetesCluster_osSku(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.osSku(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func (KubernetesClusterResource) basicAvailabilitySetConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down Expand Up @@ -1967,3 +1990,30 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, privateClusterPublicFqdnEnabled)
}

func (KubernetesClusterResource) osSku(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2s_v3"
os_sku = "Ubuntu"
}
identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger)
}
16 changes: 16 additions & 0 deletions internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,17 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
}, false),
},

"os_sku": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
Default: string(containerservice.OSSKUUbuntu),
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.OSSKUUbuntu),
string(containerservice.OSSKUCBLMariner),
}, false),
},

"ultra_ssd_enabled": {
Type: pluginsdk.TypeBool,
ForceNew: true,
Expand Down Expand Up @@ -675,6 +686,10 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag
profile.OsDiskType = containerservice.OSDiskType(raw["os_disk_type"].(string))
}

if osSku := raw["os_sku"].(string); osSku != "" {
profile.OsSKU = containerservice.OSSKU(osSku)
}

if podSubnetID := raw["pod_subnet_id"].(string); podSubnetID != "" {
profile.PodSubnetID = utils.String(podSubnetID)
}
Expand Down Expand Up @@ -1070,6 +1085,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"node_taints": []string{},
"os_disk_size_gb": osDiskSizeGB,
"os_disk_type": string(osDiskType),
"os_sku": string(agentPool.OsSKU),
"tags": tags.Flatten(agentPool.Tags),
"type": string(agentPool.Type),
"ultra_ssd_enabled": enableUltraSSD,
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,8 @@ A `default_node_pool` block supports the following:

* `os_disk_type` - (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.

* `os_sku` - (Optional) OsSKU to be used to specify Linux OSType. Not applicable to Windows OSType. Possible values include: `Ubuntu`, `CBLMariner`. Defaults to `Ubuntu`. Changing this forces a new resource to be created.

* `pod_subnet_id` - (Optional) The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.

-> **NOTE:** This requires that the Preview Feature `Microsoft.ContainerService/PodSubnetPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#register-the-podsubnetpreview-preview-feature) for more information.
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ The following arguments are supported:

-> **NOTE:** This requires that the Preview Feature `Microsoft.ContainerService/PodSubnetPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#register-the-podsubnetpreview-preview-feature) for more information.

* `os_sku` - (Optional) OsSKU to be used to specify Linux OSType. Not applicable to Windows OSType. Possible values include: `Ubuntu`, `CBLMariner`. Defaults to `Ubuntu`. Changing this forces a new resource to be created.

* `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.

* `priority` - (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
Expand Down