Skip to content

Commit

Permalink
r/kubernetes_cluster: updating the default node pool using the separa…
Browse files Browse the repository at this point in the history
…te api
  • Loading branch information
tombuildsstuff committed Nov 19, 2019
1 parent e07982a commit a153867
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 31 deletions.
5 changes: 5 additions & 0 deletions azurerm/internal/services/containers/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
)

type Client struct {
AgentPoolsClient *containerservice.AgentPoolsClient
KubernetesClustersClient *containerservice.ManagedClustersClient
GroupsClient *containerinstance.ContainerGroupsClient
RegistriesClient *containerregistry.RegistriesClient
Expand Down Expand Up @@ -37,7 +38,11 @@ func BuildClient(o *common.ClientOptions) *Client {
KubernetesClustersClient := containerservice.NewManagedClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&KubernetesClustersClient.Client, o.ResourceManagerAuthorizer)

agentPoolsClient := containerservice.NewAgentPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&agentPoolsClient.Client, o.ResourceManagerAuthorizer)

return &Client{
AgentPoolsClient: &agentPoolsClient,
KubernetesClustersClient: &KubernetesClustersClient,
GroupsClient: &GroupsClient,
RegistriesClient: &RegistriesClient,
Expand Down
25 changes: 25 additions & 0 deletions azurerm/internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,31 @@ func SchemaDefaultNodePool() *schema.Schema {
}
}

func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterAgentPoolProfile) containerservice.AgentPool {
defaultCluster := (*input)[0]
return containerservice.AgentPool{
Name: defaultCluster.Name,
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
Count: defaultCluster.Count,
VMSize: defaultCluster.VMSize,
OsDiskSizeGB: defaultCluster.OsDiskSizeGB,
VnetSubnetID: defaultCluster.VnetSubnetID,
MaxPods: defaultCluster.MaxPods,
OsType: defaultCluster.OsType,
MaxCount: defaultCluster.MaxCount,
MinCount: defaultCluster.MinCount,
EnableAutoScaling: defaultCluster.EnableAutoScaling,
Type: defaultCluster.Type,
OrchestratorVersion: defaultCluster.OrchestratorVersion,
AvailabilityZones: defaultCluster.AvailabilityZones,
EnableNodePublicIP: defaultCluster.EnableNodePublicIP,
ScaleSetPriority: defaultCluster.ScaleSetPriority,
ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy,
NodeTaints: defaultCluster.NodeTaints,
},
}
}

func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) {
input := d.Get("default_node_pool").([]interface{})
// TODO: in 2.0 make this Required
Expand Down
73 changes: 42 additions & 31 deletions azurerm/resource_arm_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,8 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{}
}

func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient).Containers.KubernetesClustersClient
nodePoolsClient := meta.(*ArmClient).Containers.AgentPoolsClient
clusterClient := meta.(*ArmClient).Containers.KubernetesClustersClient
ctx, cancel := timeouts.ForUpdate(meta.(*ArmClient).StopContext, d)
defer cancel()
tenantId := meta.(*ArmClient).tenantId
Expand Down Expand Up @@ -714,19 +715,19 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
ClientID: utils.String(clientId),
Secret: utils.String(clientSecret),
}
future, err := client.ResetServicePrincipalProfile(ctx, resourceGroup, name, params)
future, err := clusterClient.ResetServicePrincipalProfile(ctx, resourceGroup, name, params)
if err != nil {
return fmt.Errorf("Error updating Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil {
return fmt.Errorf("Error waiting for update of Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
log.Printf("[DEBUG] Updated the Service Principal for Kubernetes Cluster %q (Resource Group %q).", name, resourceGroup)
}

// we need to conditionally update the cluster
existing, err := client.Get(ctx, resourceGroup, name)
existing, err := clusterClient.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
Expand All @@ -744,28 +745,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
existing.ManagedClusterProperties.AddonProfiles = addonProfiles
}

if d.HasChange("default_node_pool") || d.HasChange("agent_pool_profile") {
updateCluster = true
agentProfiles, err := containers.ExpandDefaultNodePool(d)
if err != nil {
return fmt.Errorf("Error expanding `default_node_pool`: %+v", err)
}

// TODO: remove me in 2.0
if agentProfiles == nil {
agentProfilesRaw := d.Get("agent_pool_profile").([]interface{})
agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false)
if err != nil {
return err
}

agentProfiles = &agentProfilesLegacy
}

// TODO: switch to updating via the AgentPools client
existing.ManagedClusterProperties.AgentPoolProfiles = agentProfiles
}

if d.HasChange("api_server_authorized_ip_ranges") {
updateCluster = true
apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List()
Expand Down Expand Up @@ -815,20 +794,52 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}

if updateCluster {
log.Printf("[DEBUG] Updating the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup)
future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing)
future, err := clusterClient.CreateOrUpdate(ctx, resourceGroup, name, existing)
if err != nil {
return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil {
return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
log.Printf("[DEBUG] Updated the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup)
}

// update the node pool using the separate API
if d.HasChange("default_node_pool") || d.HasChange("agent_pool_profile") {
log.Printf("[DEBUG] Updating of Default Node Pool..")

agentProfiles, err := containers.ExpandDefaultNodePool(d)
if err != nil {
return fmt.Errorf("Error expanding `default_node_pool`: %+v", err)
}

// TODO: remove me in 2.0
if agentProfiles == nil {
agentProfilesRaw := d.Get("agent_pool_profile").([]interface{})
agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false)
if err != nil {
return err
}

agentProfiles = &agentProfilesLegacy
}

agentProfile := containers.ConvertDefaultNodePoolToAgentPool(agentProfiles)
agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, resourceGroup, name, *agentProfile.Name, agentProfile)
if err != nil {
return fmt.Errorf("Error updating Default Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if err := agentPool.WaitForCompletionRef(ctx, nodePoolsClient.Client); err != nil {
return fmt.Errorf("Error waiting for update of Default Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err)
}
log.Printf("[DEBUG] Updated Default Node Pool.")
}

// then roll the version of Kubernetes if necessary
if d.HasChange("kubernetes_version") {
existing, err = client.Get(ctx, resourceGroup, name)
existing, err = clusterClient.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
Expand All @@ -840,12 +851,12 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
log.Printf("[DEBUG] Upgrading the version of Kubernetes to %q..", kubernetesVersion)
existing.ManagedClusterProperties.KubernetesVersion = utils.String(kubernetesVersion)

future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing)
future, err := clusterClient.CreateOrUpdate(ctx, resourceGroup, name, existing)
if err != nil {
return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil {
return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

Expand Down

0 comments on commit a153867

Please sign in to comment.