Skip to content
This repository has been archived by the owner on Oct 24, 2023. It is now read-only.

Commit

Permalink
fix: persist VMSS name in API model (#4051)
Browse files Browse the repository at this point in the history
  • Loading branch information
jackfrancis authored Nov 20, 2020
1 parent cff0a34 commit cc2e826
Show file tree
Hide file tree
Showing 13 changed files with 154 additions and 78 deletions.
16 changes: 10 additions & 6 deletions cmd/addpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/Azure/aks-engine/pkg/i18n"
"github.com/Azure/aks-engine/pkg/operations"
"github.com/Azure/go-autorest/autorest/to"
"github.com/leonelquinteros/gotext"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
Expand Down Expand Up @@ -143,6 +144,13 @@ func (apc *addPoolCmd) load() error {
return errors.Wrap(err, "error parsing the agent pool")
}

// Back-compat logic to populate the VMSSName property for clusters built prior to VMSSName being a part of the API model spec
if apc.nodePool.IsVirtualMachineScaleSets() && apc.nodePool.VMSSName == "" {
existingPools := len(apc.containerService.Properties.AgentPoolProfiles)
newIndex := existingPools + 1
apc.nodePool.VMSSName = apc.containerService.Properties.GetAgentVMPrefix(apc.nodePool, newIndex)
}

if apc.containerService.Properties.IsCustomCloudProfile() {
if err = writeCustomCloudProfile(apc.containerService); err != nil {
return errors.Wrap(err, "error writing custom cloud profile")
Expand Down Expand Up @@ -210,12 +218,8 @@ func (apc *addPoolCmd) run(cmd *cobra.Command, args []string) error {
return errors.Wrap(err, "failed to get VMSS list in the resource group")
}
for _, vmss := range vmssListPage.Values() {
segments := strings.Split(*vmss.Name, "-")
if len(segments) == 4 && segments[0] == "k8s" {
vmssName := segments[1]
if apc.nodePool.Name == vmssName {
return errors.New("An agent pool with the given name already exists in the cluster")
}
if apc.nodePool.VMSSName == to.String(vmss.Name) {
return errors.New("A VMSS node pool with the given name already exists in the cluster")
}
}
}
Expand Down
26 changes: 11 additions & 15 deletions cmd/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@ import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"

"github.com/Azure/aks-engine/pkg/armhelpers/utils"
"github.com/Azure/go-autorest/autorest/to"

"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/armhelpers"
Expand Down Expand Up @@ -226,6 +226,11 @@ func (sc *scaleCmd) load() error {
}
}

// Back-compat logic to populate the VMSSName property for clusters built prior to VMSSName being a part of the API model spec
if sc.agentPool.IsVirtualMachineScaleSets() && sc.agentPool.VMSSName == "" {
sc.agentPool.VMSSName = sc.containerService.Properties.GetAgentVMPrefix(sc.agentPool, sc.agentPoolIndex)
}

//allows to identify VMs in the resource group that belong to this cluster.
sc.nameSuffix = sc.containerService.Properties.GetClusterID()
log.Debugf("Cluster ID used in all agent pools: %s", sc.nameSuffix)
Expand Down Expand Up @@ -406,22 +411,12 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
return errors.Wrap(err, "failed to get VMSS list in the resource group")
}
for _, vmss := range vmssListPage.Values() {
vmssName := *vmss.Name
if sc.agentPool.OSType == api.Windows {
possibleIndex, err := strconv.Atoi(vmssName[len(vmssName)-2:])
if err != nil {
continue
}
if !(sc.containerService.Properties.GetAgentVMPrefix(sc.agentPool, possibleIndex) == vmssName) {
continue
}
winPoolIndex = possibleIndex
vmssName := to.String(vmss.Name)
if sc.agentPool.VMSSName == vmssName {
log.Infof("found VMSS %s in resource group %s that correlates with node pool %s", vmssName, sc.resourceGroupName, sc.agentPoolToScale)
} else {
if !sc.vmInAgentPool(vmssName, vmss.Tags) {
continue
}
continue
}
log.Infof("found VMSS %s in resource group %s that correlates with node pool %s", vmssName, sc.resourceGroupName, sc.agentPoolToScale)

if vmss.Sku != nil {
currentNodeCount = int(*vmss.Sku.Capacity)
Expand All @@ -441,6 +436,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {

currentNodeCount = int(*vmss.Sku.Capacity)
highestUsedIndex = 0
break
}
}
}
Expand Down
24 changes: 11 additions & 13 deletions cmd/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@ import (
"context"
"fmt"
"os"
"strconv"

"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/Azure/aks-engine/pkg/engine"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/Azure/aks-engine/pkg/i18n"
"github.com/Azure/go-autorest/autorest/to"
"github.com/leonelquinteros/gotext"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
Expand Down Expand Up @@ -170,6 +170,11 @@ func (uc *updateCmd) load() error {
return errors.Errorf("aks-engine node pool update requires a VMSS node pool, %s is backed by a VM Availability Set", uc.agentPoolToUpdate)
}

// Back-compat logic to populate the VMSSName property for clusters built prior to VMSSName being a part of the API model spec
if uc.agentPool.IsVirtualMachineScaleSets() && uc.agentPool.VMSSName == "" {
uc.agentPool.VMSSName = uc.containerService.Properties.GetAgentVMPrefix(uc.agentPool, uc.agentPoolIndex)
}

//allows to identify VMs in the resource group that belong to this cluster.
uc.nameSuffix = uc.containerService.Properties.GetClusterID()
log.Debugf("Cluster ID used in all agent pools: %s", uc.nameSuffix)
Expand Down Expand Up @@ -227,24 +232,17 @@ func (uc *updateCmd) run(cmd *cobra.Command, args []string) error {
return errors.Wrap(err, "failed to get VMSS list in the resource group")
}
for _, vmss := range vmssListPage.Values() {
vmssName := *vmss.Name
if sc.agentPool.OSType == api.Windows {
possibleIndex, nameMungingErr := strconv.Atoi(vmssName[len(vmssName)-2:])
if nameMungingErr != nil {
continue
}
if !(sc.containerService.Properties.GetAgentVMPrefix(sc.agentPool, possibleIndex) == vmssName) {
continue
}
vmssName := to.String(vmss.Name)
if sc.agentPool.VMSSName == vmssName {
log.Infof("found VMSS %s in resource group %s that correlates with node pool %s", vmssName, sc.resourceGroupName, sc.agentPoolToScale)
} else {
if !sc.vmInAgentPool(vmssName, vmss.Tags) {
continue
}
continue
}

if vmss.Sku != nil {
sc.newDesiredAgentCount = int(*vmss.Sku.Capacity)
uc.agentPool.Count = sc.newDesiredAgentCount
break
} else {
return errors.Wrap(err, fmt.Sprintf("failed to find VMSS matching node pool %s in resource group %s", sc.agentPoolToScale, sc.resourceGroupName))
}
Expand Down
1 change: 1 addition & 0 deletions pkg/api/converterfromapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -608,6 +608,7 @@ func convertAgentPoolProfileToVLabs(api *AgentPoolProfile, p *vlabs.AgentPoolPro
}
p.OSDiskCachingType = api.OSDiskCachingType
p.DataDiskCachingType = api.DataDiskCachingType
p.VMSSName = api.VMSSName
}

func convertServicePrincipalProfileToVLabs(api *ServicePrincipalProfile, v *vlabs.ServicePrincipalProfile) {
Expand Down
1 change: 1 addition & 0 deletions pkg/api/convertertoapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -647,6 +647,7 @@ func convertVLabsAgentPoolProfile(vlabs *vlabs.AgentPoolProfile, api *AgentPoolP
}
api.OSDiskCachingType = vlabs.OSDiskCachingType
api.DataDiskCachingType = vlabs.DataDiskCachingType
api.VMSSName = vlabs.VMSSName
}

func convertVLabsKeyVaultSecrets(vlabs *vlabs.KeyVaultSecrets, api *KeyVaultSecrets) {
Expand Down
4 changes: 3 additions & 1 deletion pkg/api/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ func (p *Properties) setMasterProfileDefaults() {
}

func (p *Properties) setAgentProfileDefaults(isUpgrade, isScale bool) {
for _, profile := range p.AgentPoolProfiles {
for i, profile := range p.AgentPoolProfiles {
if profile.AvailabilityProfile == "" {
profile.AvailabilityProfile = VirtualMachineScaleSets
}
Expand All @@ -712,6 +712,8 @@ func (p *Properties) setAgentProfileDefaults(isUpgrade, isScale bool) {
if profile.VMSSOverProvisioningEnabled == nil {
profile.VMSSOverProvisioningEnabled = to.BoolPtr(DefaultVMSSOverProvisioningEnabled && !isUpgrade && !isScale)
}

profile.VMSSName = p.GetAgentVMPrefix(profile, i)
}
// set default OSType to Linux
if profile.OSType == "" {
Expand Down
20 changes: 20 additions & 0 deletions pkg/api/defaults_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -705,6 +705,26 @@ func TestVMSSOverProvisioning(t *testing.T) {
}
}

func TestVMSSDefaultsVMSSName(t *testing.T) {
poolName := "foo"
mockCS := getMockBaseContainerService("1.18.10")
mockCS.Properties.AgentPoolProfiles[0].AvailabilityProfile = VirtualMachineScaleSets
mockCS.Properties.AgentPoolProfiles[0].Name = poolName
expectedVMSSName := mockCS.Properties.GetAgentVMPrefix(mockCS.Properties.AgentPoolProfiles[0], 0)
_, err := mockCS.SetPropertiesDefaults(PropertiesDefaultsParams{
IsScale: false,
IsUpgrade: false,
PkiKeySize: helpers.DefaultPkiKeySize,
})
if err != nil {
t.Errorf("expected no error from SetPropertiesDefaults, instead got %s", err)
}

if mockCS.Properties.AgentPoolProfiles[0].VMSSName != expectedVMSSName {
t.Errorf("expected VMSSName to be %s, instead got %s", expectedVMSSName, mockCS.Properties.AgentPoolProfiles[0].VMSSName)
}
}

func TestAuditDEnabled(t *testing.T) {
mockCS := getMockBaseContainerService("1.12.7")
isUpgrade := true
Expand Down
5 changes: 5 additions & 0 deletions pkg/api/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -668,6 +668,8 @@ type AgentPoolProfile struct {
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
OSDiskCachingType string `json:"osDiskCachingType,omitempty"`
DataDiskCachingType string `json:"dataDiskCachingType,omitempty"`
// VMSSName is a read-only field; its value will be computed during template generation
VMSSName string `json:"vmssName,omitempty"`
}

// AgentPoolProfileRole represents an agent role
Expand Down Expand Up @@ -927,6 +929,9 @@ func (p *Properties) GetAgentPoolIndexByName(name string) int {

// GetAgentVMPrefix returns the VM prefix for an agentpool.
func (p *Properties) GetAgentVMPrefix(a *AgentPoolProfile, index int) string {
if a.IsVirtualMachineScaleSets() && a.VMSSName != "" {
return a.VMSSName
}
nameSuffix := p.GetClusterID()
vmPrefix := ""
if index != -1 {
Expand Down
32 changes: 32 additions & 0 deletions pkg/api/types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5510,6 +5510,38 @@ func TestGetAgentVMPrefix(t *testing.T) {
},
expectedVMPrefix: "k8s-agentpool-30819786-vmss",
},
{
name: "Linux VMSS agent pool profile with VMSSName",
profile: &AgentPoolProfile{
Name: "agentpool",
VMSize: "Standard_D2_v2",
Count: 1,
AvailabilityProfile: "VirtualMachineScaleSets",
OSType: Linux,
VMSSName: "foo",
},
properties: &Properties{
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
},
MasterProfile: &MasterProfile{
Count: 1,
DNSPrefix: "myprefix1",
VMSize: "Standard_DS2_v2",
},
AgentPoolProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
VMSize: "Standard_D2_v2",
Count: 1,
AvailabilityProfile: "VirtualMachineScaleSets",
OSType: Linux,
VMSSName: "foo",
},
},
},
expectedVMPrefix: "foo",
},
{
name: "Windows agent pool profile",
profile: &AgentPoolProfile{
Expand Down
2 changes: 2 additions & 0 deletions pkg/api/vlabs/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,8 @@ type AgentPoolProfile struct {
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
OSDiskCachingType string `json:"osDiskCachingType,omitempty"`
DataDiskCachingType string `json:"dataDiskCachingType,omitempty"`
// VMSSName is a read-only field; its value will be computed during template generation
VMSSName string `json:"vmssName,omitempty"`
}

// AgentPoolProfileRole represents an agent role
Expand Down
43 changes: 20 additions & 23 deletions test/e2e/cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,6 @@ if [ "$STABILITY_TIMEOUT_SECONDS" == "" ]; then
STABILITY_TIMEOUT_SECONDS=5
fi

if [ -n "$ADD_NODE_POOL_INPUT" ]; then
cat > ${TMP_DIR}/addpool-input.json <<END
${ADD_NODE_POOL_INPUT}
END
fi

if [ -n "$PRIVATE_SSH_KEY_FILE" ]; then
PRIVATE_SSH_KEY_FILE=$(realpath --relative-to=$(pwd) ${PRIVATE_SSH_KEY_FILE})
fi
Expand Down Expand Up @@ -257,23 +251,26 @@ else
fi

if [ -n "$ADD_NODE_POOL_INPUT" ]; then
docker run --rm \
-v $(pwd):${WORK_DIR} \
-v /etc/ssl/certs:/etc/ssl/certs \
-w ${WORK_DIR} \
-e RESOURCE_GROUP=$RESOURCE_GROUP \
-e REGION=$REGION \
${DEV_IMAGE} \
./bin/aks-engine addpool \
--azure-env ${AZURE_ENV} \
--subscription-id ${AZURE_SUBSCRIPTION_ID} \
--api-model _output/$RESOURCE_GROUP/apimodel.json \
--node-pool ${TMP_BASENAME}/addpool-input.json \
--location $REGION \
--resource-group $RESOURCE_GROUP \
--auth-method client_secret \
--client-id ${AZURE_CLIENT_ID} \
--client-secret ${AZURE_CLIENT_SECRET} || exit 1
for pool in $(echo ${ADD_NODE_POOL_INPUT} | jq -c '.[]'); do
echo $pool > ${TMP_DIR}/addpool-input.json
docker run --rm \
-v $(pwd):${WORK_DIR} \
-v /etc/ssl/certs:/etc/ssl/certs \
-w ${WORK_DIR} \
-e RESOURCE_GROUP=$RESOURCE_GROUP \
-e REGION=$REGION \
${DEV_IMAGE} \
./bin/aks-engine addpool \
--azure-env ${AZURE_ENV} \
--subscription-id ${AZURE_SUBSCRIPTION_ID} \
--api-model _output/$RESOURCE_GROUP/apimodel.json \
--node-pool ${TMP_BASENAME}/addpool-input.json \
--location $REGION \
--resource-group $RESOURCE_GROUP \
--auth-method client_secret \
--client-id ${AZURE_CLIENT_ID} \
--client-secret ${AZURE_CLIENT_SECRET} || exit 1
done

CLEANUP_AFTER_ADD_NODE_POOL=${CLEANUP_ON_EXIT}
if [ "${UPGRADE_CLUSTER}" = "true" ] || [ "${SCALE_CLUSTER}" = "true" ]; then
Expand Down
25 changes: 17 additions & 8 deletions test/e2e/test_cluster_configs/availabilityset.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
},
"agentPoolProfiles": [
{
"name": "agent1",
"name": "poollinux1",
"count": 1,
"vmSize": "Standard_D2_v3",
"osType": "Linux",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "poolwin",
"name": "poolwin1",
"count": 1,
"vmSize": "Standard_D2_v3",
"osType": "Windows",
Expand All @@ -46,10 +46,19 @@
}
}
},
"addNodePool": {
"name": "pooladded",
"count": 1,
"vmSize": "Standard_D2_v3",
"availabilityProfile": "AvailabilitySet"
}
"addNodePool": [
{
"name": "poollinux2",
"count": 1,
"vmSize": "Standard_D2_v3",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "poolwin2",
"count": 1,
"vmSize": "Standard_D2_v3",
"osType": "Windows",
"availabilityProfile": "AvailabilitySet"
}
]
}
Loading

0 comments on commit cc2e826

Please sign in to comment.