Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: remove GetDisk operation in AttachDisk #678

Merged
merged 1 commit into from
Jun 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 10 additions & 29 deletions pkg/provider/azure_controller_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (

azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)

const (
Expand Down Expand Up @@ -145,25 +144,14 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.Azu

// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI.
// return (lun, error)
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) {
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName,
cachingMode compute.CachingTypes, disk *compute.Disk) (int32, error) {
diskEncryptionSetID := ""
writeAcceleratorEnabled := false

if isManagedDisk {
diskName := path.Base(diskURI)
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
return -1, err
}

ctx, cancel := getContextWithCancel()
defer cancel()

disk, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
if rerr != nil {
return -1, rerr.Error()
}

// there is possibility that disk is nil when GetDisk is throttled
// don't check disk state when GetDisk is throttled
if disk != nil {
if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
if err != nil {
Expand Down Expand Up @@ -225,8 +213,8 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
writeAcceleratorEnabled: writeAcceleratorEnabled,
}
node := strings.ToLower(string(nodeName))
disk := strings.ToLower(diskURI)
if err := c.insertAttachDiskRequest(disk, node, &options); err != nil {
diskuri := strings.ToLower(diskURI)
if err := c.insertAttachDiskRequest(diskuri, node, &options); err != nil {
return -1, err
}

Expand All @@ -237,7 +225,7 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
return -1, err
}

lun, err := c.SetDiskLun(nodeName, disk, diskMap)
lun, err := c.SetDiskLun(nodeName, diskuri, diskMap)
if err != nil {
return -1, err
}
Expand Down Expand Up @@ -341,18 +329,11 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
err, diskURI)
return nil
}
if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %w", diskName, diskURI, err)
err = vmset.DetachDisk(nodeName, diskMap)
}
klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
return err
}
}

if err != nil {
klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
return err
}

klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI)
return nil
}
Expand Down
74 changes: 34 additions & 40 deletions pkg/provider/azure_controller_common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ func TestCommonAttachDisk(t *testing.T) {
testTags[WriteAcceleratorEnabled] = to.StringPtr("true")
testCases := []struct {
desc string
existedDisk compute.Disk
diskName string
existedDisk *compute.Disk
nodeName types.NodeName
vmList map[string]string
isDataDisksFull bool
Expand All @@ -61,10 +62,20 @@ func TestCommonAttachDisk(t *testing.T) {
expectedErr bool
expectedLun int32
}{
{
desc: "correct LUN and no error shall be returned if disk is nil",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
diskName: "disk-name",
existedDisk: nil,
expectedLun: 1,
expectedErr: false,
},
{
desc: "LUN -1 and error shall be returned if there's no such instance corresponding to given nodeName",
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")},
expectedLun: -1,
expectedErr: true,
},
Expand All @@ -73,15 +84,17 @@ func TestCommonAttachDisk(t *testing.T) {
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
isDataDisksFull: true,
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")},
expectedLun: -1,
expectedErr: true,
},
{
desc: "correct LUN and no error shall be returned if everything is good",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name"),
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"),
DiskProperties: &compute.DiskProperties{
Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey},
DiskSizeGB: to.Int32Ptr(4096),
Expand All @@ -95,7 +108,8 @@ func TestCommonAttachDisk(t *testing.T) {
desc: "an error shall be returned if disk state is not Unattached",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name"),
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"),
DiskProperties: &compute.DiskProperties{
Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey},
DiskSizeGB: to.Int32Ptr(4096),
Expand All @@ -105,36 +119,21 @@ func TestCommonAttachDisk(t *testing.T) {
expectedLun: -1,
expectedErr: true,
},
{
desc: "an error shall be returned if there's invalid disk uri",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
isBadDiskURI: true,
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
expectedLun: -1,
expectedErr: true,
},
{
desc: "an error shall be returned if attach an already attached disk with good ManagedBy instance id",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
expectedLun: -1,
expectedErr: true,
},
{
desc: "an error shall be returned if attach an already attached disk with bad ManagedBy instance id",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr("test"), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
expectedLun: -1,
expectedErr: true,
},
{
desc: "an error shall be returned if there's no matching disk",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name-1")},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr("test"), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
expectedLun: -1,
expectedErr: true,
},
Expand All @@ -151,10 +150,10 @@ func TestCommonAttachDisk(t *testing.T) {
lockMap: newLockMap(),
}
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName)
if test.isBadDiskURI {
diskURI = fmt.Sprintf("/baduri/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName)
}
expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull)
if test.isDiskUsed {
Expand All @@ -170,11 +169,7 @@ func TestCommonAttachDisk(t *testing.T) {
}
mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes()
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()

lun, err := common.AttachDisk(true, "", diskURI, test.nodeName, compute.CachingTypesReadOnly)
lun, err := common.AttachDisk(true, "", diskURI, test.nodeName, compute.CachingTypesReadOnly, test.existedDisk)
assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
}
Expand All @@ -194,7 +189,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
isManagedDisk bool
isDataDisksFull bool
expectedErr bool
existedDisk compute.Disk
diskName string
existedDisk *compute.Disk
expectedLun int32
}{
{
Expand All @@ -204,7 +200,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
isVMSS: false,
isManagedBy: false,
isManagedDisk: false,
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")},
expectedLun: -1,
expectedErr: true,
},
Expand All @@ -215,7 +212,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
isVMSS: true,
isManagedBy: false,
isManagedDisk: false,
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
diskName: "disk-name",
existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")},
expectedLun: -1,
expectedErr: true,
},
Expand Down Expand Up @@ -254,7 +252,7 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
lockMap: newLockMap(),
}
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName)
if !test.isVMSS {
expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull)
mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
Expand All @@ -265,13 +263,9 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
}
mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes()
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
}

lun, err := common.AttachDisk(test.isManagedDisk, "test", diskURI, test.nodeName, compute.CachingTypesReadOnly)
lun, err := common.AttachDisk(test.isManagedDisk, "test", diskURI, test.nodeName, compute.CachingTypesReadOnly, test.existedDisk)
assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
}
Expand Down