diff --git a/pkg/provider/azure_controller_common.go b/pkg/provider/azure_controller_common.go index 8c0142bfcd..154dbed1db 100644 --- a/pkg/provider/azure_controller_common.go +++ b/pkg/provider/azure_controller_common.go @@ -37,7 +37,6 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) const ( @@ -145,25 +144,14 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.Azu // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI. // return (lun, error) -func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) { +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, + cachingMode compute.CachingTypes, disk *compute.Disk) (int32, error) { diskEncryptionSetID := "" writeAcceleratorEnabled := false - if isManagedDisk { - diskName := path.Base(diskURI) - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) - if err != nil { - return -1, err - } - - ctx, cancel := getContextWithCancel() - defer cancel() - - disk, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName) - if rerr != nil { - return -1, rerr.Error() - } - + // there is possibility that disk is nil when GetDisk is throttled + // don't check disk state when GetDisk is throttled + if disk != nil { if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) { vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { @@ -225,8 +213,8 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri writeAcceleratorEnabled: writeAcceleratorEnabled, } node := strings.ToLower(string(nodeName)) - disk := strings.ToLower(diskURI) - if err := c.insertAttachDiskRequest(disk, node, &options); err != nil { + diskuri := strings.ToLower(diskURI) + if err := c.insertAttachDiskRequest(diskuri, node, &options); err != nil { return -1, err } @@ -237,7 +225,7 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri return -1, err } - lun, err := c.SetDiskLun(nodeName, disk, diskMap) + lun, err := c.SetDiskLun(nodeName, diskuri, diskMap) if err != nil { return -1, err } @@ -341,18 +329,11 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N err, diskURI) return nil } - if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff { - klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %w", diskName, diskURI, err) - err = vmset.DetachDisk(nodeName, diskMap) - } + klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) + return err } } - if err != nil { - klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) - return err - } - klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) return nil } diff --git a/pkg/provider/azure_controller_common_test.go b/pkg/provider/azure_controller_common_test.go index 7344a340d8..72f7d4b783 100644 --- a/pkg/provider/azure_controller_common_test.go +++ b/pkg/provider/azure_controller_common_test.go @@ -52,7 +52,8 @@ func TestCommonAttachDisk(t *testing.T) { testTags[WriteAcceleratorEnabled] = to.StringPtr("true") testCases := []struct { desc string - existedDisk compute.Disk + diskName string + existedDisk *compute.Disk nodeName types.NodeName vmList map[string]string isDataDisksFull bool @@ -61,10 +62,20 @@ func TestCommonAttachDisk(t *testing.T) { expectedErr bool expectedLun int32 }{ + { + desc: "correct LUN and no error shall be returned if disk is nil", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + diskName: "disk-name", + existedDisk: nil, + expectedLun: 1, + expectedErr: false, + }, { desc: "LUN -1 and error shall be returned if there's no such instance corresponding to given nodeName", nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name")}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")}, expectedLun: -1, expectedErr: true, }, @@ -73,7 +84,8 @@ func TestCommonAttachDisk(t *testing.T) { vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", isDataDisksFull: true, - existedDisk: compute.Disk{Name: to.StringPtr("disk-name")}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")}, expectedLun: -1, expectedErr: true, }, @@ -81,7 +93,8 @@ func TestCommonAttachDisk(t *testing.T) { desc: "correct LUN and no error shall be returned if everything is good", vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), DiskProperties: &compute.DiskProperties{ Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, DiskSizeGB: to.Int32Ptr(4096), @@ -95,7 +108,8 @@ func TestCommonAttachDisk(t *testing.T) { desc: "an error shall be returned if disk state is not Unattached", vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), DiskProperties: &compute.DiskProperties{ Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionTypeEncryptionAtRestWithCustomerKey}, DiskSizeGB: to.Int32Ptr(4096), @@ -105,20 +119,12 @@ func TestCommonAttachDisk(t *testing.T) { expectedLun: -1, expectedErr: true, }, - { - desc: "an error shall be returned if there's invalid disk uri", - vmList: map[string]string{"vm1": "PowerState/Running"}, - nodeName: "vm1", - isBadDiskURI: true, - existedDisk: compute.Disk{Name: to.StringPtr("disk-name")}, - expectedLun: -1, - expectedErr: true, - }, { desc: "an error shall be returned if attach an already attached disk with good ManagedBy instance id", vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}}, expectedLun: -1, expectedErr: true, }, @@ -126,15 +132,8 @@ func TestCommonAttachDisk(t *testing.T) { desc: "an error shall be returned if attach an already attached disk with bad ManagedBy instance id", vmList: map[string]string{"vm1": "PowerState/Running"}, nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr("test"), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}}, - expectedLun: -1, - expectedErr: true, - }, - { - desc: "an error shall be returned if there's no matching disk", - vmList: map[string]string{"vm1": "PowerState/Running"}, - nodeName: "vm1", - existedDisk: compute.Disk{Name: to.StringPtr("disk-name-1")}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name"), ManagedBy: to.StringPtr("test"), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}}, expectedLun: -1, expectedErr: true, }, @@ -151,10 +150,10 @@ func TestCommonAttachDisk(t *testing.T) { lockMap: newLockMap(), } diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) + testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName) if test.isBadDiskURI { diskURI = fmt.Sprintf("/baduri/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) + testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName) } expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull) if test.isDiskUsed { @@ -170,11 +169,7 @@ func TestCommonAttachDisk(t *testing.T) { } mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes() - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() - - lun, err := common.AttachDisk(true, "", diskURI, test.nodeName, compute.CachingTypesReadOnly) + lun, err := common.AttachDisk(true, "", diskURI, test.nodeName, compute.CachingTypesReadOnly, test.existedDisk) assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc) assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) } @@ -194,7 +189,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { isManagedDisk bool isDataDisksFull bool expectedErr bool - existedDisk compute.Disk + diskName string + existedDisk *compute.Disk expectedLun int32 }{ { @@ -204,7 +200,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { isVMSS: false, isManagedBy: false, isManagedDisk: false, - existedDisk: compute.Disk{Name: to.StringPtr("disk-name")}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")}, expectedLun: -1, expectedErr: true, }, @@ -215,7 +212,8 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { isVMSS: true, isManagedBy: false, isManagedDisk: false, - existedDisk: compute.Disk{Name: to.StringPtr("disk-name")}, + diskName: "disk-name", + existedDisk: &compute.Disk{Name: to.StringPtr("disk-name")}, expectedLun: -1, expectedErr: true, }, @@ -254,7 +252,7 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { lockMap: newLockMap(), } diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) + testCloud.SubscriptionID, testCloud.ResourceGroup, test.diskName) if !test.isVMSS { expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull) mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) @@ -265,13 +263,9 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() } mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - - mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes() - mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() } - lun, err := common.AttachDisk(test.isManagedDisk, "test", diskURI, test.nodeName, compute.CachingTypesReadOnly) + lun, err := common.AttachDisk(test.isManagedDisk, "test", diskURI, test.nodeName, compute.CachingTypesReadOnly, test.existedDisk) assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc) assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) }