diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index b0d4b188a2..9445d8de77 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -15,4 +15,5 @@ jobs: - name: Run linter uses: golangci/golangci-lint-action@v4 with: - version: v1.54 + version: v1.60 + args: --timeout 10m \ No newline at end of file diff --git a/pkg/azuredisk/azure_controller_common.go b/pkg/azuredisk/azure_controller_common.go index 0600c63822..acf4654a79 100644 --- a/pkg/azuredisk/azure_controller_common.go +++ b/pkg/azuredisk/azure_controller_common.go @@ -54,7 +54,7 @@ const ( maxLUN = 64 // max number of LUNs per VM errStatusCode400 = "statuscode=400" errInvalidParameter = `code="invalidparameter"` - errTargetInstanceIds = `target="instanceids"` + errTargetInstanceIDs = `target="instanceids"` sourceSnapshot = "snapshot" sourceVolume = "volume" attachDiskMapKeySuffix = "attachdiskmap" @@ -678,5 +678,5 @@ func isInstanceNotFoundError(err error) bool { if strings.Contains(errMsg, strings.ToLower(consts.VmssVMNotActiveErrorMessage)) { return true } - return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds) + return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIDs) } diff --git a/pkg/azuredisk/azure_controller_common_test.go b/pkg/azuredisk/azure_controller_common_test.go index 9e75240c2d..357ae392ef 100644 --- a/pkg/azuredisk/azure_controller_common_test.go +++ b/pkg/azuredisk/azure_controller_common_test.go @@ -56,7 +56,7 @@ var ( ) func fakeUpdateAsync(statusCode int) func(context.Context, string, string, compute.VirtualMachineUpdate, string) (*azure.Future, *retry.Error) { - return func(ctx context.Context, resourceGroup, nodeName string, parameters compute.VirtualMachineUpdate, source string) (*azure.Future, *retry.Error) { + return func(_ context.Context, _, nodeName string, parameters compute.VirtualMachineUpdate, _ string) (*azure.Future, *retry.Error) { vm := &compute.VirtualMachine{ Name: &nodeName, Plan: parameters.Plan, @@ -818,7 +818,7 @@ func TestIsInstanceNotFoundError(t *testing.T) { } for i, test := range testCases { - result := isInstanceNotFoundError(fmt.Errorf(test.errMsg)) + result := isInstanceNotFoundError(fmt.Errorf("%v", test.errMsg)) assert.Equal(t, test.expectedResult, result, "TestCase[%d]", i, result) } } diff --git a/pkg/azuredisk/azure_managedDiskController.go b/pkg/azuredisk/azure_managedDiskController.go index fcaa7bc161..16da9404c2 100644 --- a/pkg/azuredisk/azure_managedDiskController.go +++ b/pkg/azuredisk/azure_managedDiskController.go @@ -277,7 +277,7 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * if options.SkipGetDiskOperation { klog.Warningf("azureDisk - GetDisk(%s, StorageAccountType:%s) is throttled, unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType) } else { - err = kwait.ExponentialBackoffWithContext(ctx, defaultBackOff, func(ctx context.Context) (bool, error) { + err = kwait.ExponentialBackoffWithContext(ctx, defaultBackOff, func(_ context.Context) (bool, error) { provisionState, id, err := c.GetDisk(ctx, subsID, rg, options.DiskName) if err == nil { if id != "" { diff --git a/pkg/azuredisk/azure_managedDiskController_test.go b/pkg/azuredisk/azure_managedDiskController_test.go index 1eeee8750f..d622bff906 100644 --- a/pkg/azuredisk/azure_managedDiskController_test.go +++ b/pkg/azuredisk/azure_managedDiskController_test.go @@ -348,7 +348,7 @@ func TestCreateManagedDiskWithExtendedLocation(t *testing.T) { mockDisksClient := mock_diskclient.NewMockInterface(ctrl) common.clientFactory.(*mock_azclient.MockClientFactory).EXPECT().GetDiskClientForSub(testCloud.SubscriptionID).Return(mockDisksClient, nil).AnyTimes() mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, diskName, gomock.Any()). - Do(func(ctx interface{}, rg, dn string, disk armcompute.Disk) { + Do(func(_ interface{}, _, _ string, disk armcompute.Disk) { assert.Equal(t, el.Name, disk.ExtendedLocation.Name, "The extended location name should match.") assert.Equal(t, el.Type, disk.ExtendedLocation.Type, "The extended location type should match.") }).Return(to.Ptr(diskreturned), nil) diff --git a/pkg/azuredisk/azuredisk.go b/pkg/azuredisk/azuredisk.go index 71c50333ff..7c7d0691c0 100644 --- a/pkg/azuredisk/azuredisk.go +++ b/pkg/azuredisk/azuredisk.go @@ -184,7 +184,7 @@ func newDriverV1(options *DriverOptions) *Driver { } topologyKey = fmt.Sprintf("topology.%s/zone", driver.Name) - getter := func(key string) (interface{}, error) { return nil, nil } + getter := func(_ string) (interface{}, error) { return nil, nil } var err error if driver.throttlingCache, err = azcache.NewTimedCache(5*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) diff --git a/pkg/azuredisk/azuredisk_option_test.go b/pkg/azuredisk/azuredisk_option_test.go index 2320ef4da1..4439f23ed7 100644 --- a/pkg/azuredisk/azuredisk_option_test.go +++ b/pkg/azuredisk/azuredisk_option_test.go @@ -28,7 +28,7 @@ func TestDriverOptions_AddFlags(t *testing.T) { got := o.AddFlags() count := 0 - got.VisitAll(func(f *flag.Flag) { + got.VisitAll(func(_ *flag.Flag) { count++ }) if count != typeInfo.NumField() { diff --git a/pkg/azuredisk/controllerserver.go b/pkg/azuredisk/controllerserver.go index 5823f62f58..e579ad9080 100644 --- a/pkg/azuredisk/controllerserver.go +++ b/pkg/azuredisk/controllerserver.go @@ -324,7 +324,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -395,7 +395,7 @@ func (d *Driver) ControllerModifyVolume(ctx context.Context, req *csi.Controller diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } if _, err := d.checkDiskExists(ctx, diskURI); err != nil { @@ -440,7 +440,7 @@ func (d *Driver) ControllerModifyVolume(ctx context.Context, req *csi.Controller if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -484,7 +484,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle nodeName := types.NodeName(nodeID) diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_publish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -525,7 +525,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle } var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } occupiedLuns := d.getOccupiedLunsFromNode(ctx, nodeName, diskURI) @@ -559,7 +559,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle if len(errMsg) > maxErrMsgLength { errMsg = errMsg[:maxErrMsgLength] } - return nil, status.Errorf(codes.Internal, errMsg) + return nil, status.Errorf(codes.Internal, "%v", errMsg) } } klog.V(2).Infof("attach volume %s to node %s successfully", diskURI, nodeName) @@ -591,7 +591,7 @@ func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.Control diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_unpublish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -611,7 +611,7 @@ func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.Control if len(errMsg) > maxErrMsgLength { errMsg = errMsg[:maxErrMsgLength] } - return nil, status.Errorf(codes.Internal, errMsg) + return nil, status.Errorf(codes.Internal, "%v", errMsg) } } klog.V(2).Infof("detach volume %s from node %s successfully", diskURI, nodeID) @@ -725,7 +725,7 @@ func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) ( func (d *Driver) listVolumesInCluster(ctx context.Context, start, maxEntries int) (*csi.ListVolumesResponse, error) { pvList, err := d.cloud.KubeClient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err.Error()) + return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err) } // get all resource groups and put them into a sorted slice @@ -834,7 +834,7 @@ func (d *Driver) listVolumesByResourceGroup(ctx context.Context, resourceGroup s diskClient := d.clientFactory.GetDiskClient() disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { - return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr.Error())} + return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr)} } // if volSet is initialized but is empty, return if volSet != nil && len(volSet) == 0 { @@ -932,7 +932,7 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.Controller } result, rerr := diskClient.Get(ctx, resourceGroup, diskName) if rerr != nil { - return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr.Error()) + return nil, status.Errorf(codes.Internal, "could not get the disk(%s) under rg(%s) with error(%v)", diskName, resourceGroup, rerr) } if result.Properties == nil || result.Properties.DiskSizeGB == nil { return nil, status.Errorf(codes.Internal, "could not get size of the disk(%s)", diskName) @@ -1031,7 +1031,7 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ customTagsMap, err := volumehelper.ConvertTagsToMap(customTags, tagValueDelimiter) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Errorf(codes.InvalidArgument, "%v", err) } tags := make(map[string]*string) for k, v := range customTagsMap { @@ -1180,7 +1180,7 @@ func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequ if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } @@ -1241,7 +1241,7 @@ func (d *Driver) getSnapshotByID(ctx context.Context, subsID, resourceGroup, sna if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) diff --git a/pkg/azuredisk/controllerserver_v2.go b/pkg/azuredisk/controllerserver_v2.go index fc81942fd1..9e4f84bb7c 100644 --- a/pkg/azuredisk/controllerserver_v2.go +++ b/pkg/azuredisk/controllerserver_v2.go @@ -237,7 +237,7 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -312,7 +312,7 @@ func (d *DriverV2) ControllerModifyVolume(ctx context.Context, req *csi.Controll diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } if _, err := d.checkDiskExists(ctx, diskURI); err != nil { @@ -357,7 +357,7 @@ func (d *DriverV2) ControllerModifyVolume(ctx context.Context, req *csi.Controll if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -401,7 +401,7 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control nodeName := types.NodeName(nodeID) diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_publish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -433,11 +433,11 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control klog.V(2).Infof("Attach operation is successful. volume %s is already attached to node %s at lun %d.", diskURI, nodeName, lun) } else { if azureutils.IsThrottlingError(err) { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } var cachingMode armcompute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } klog.V(2).Infof("Trying to attach volume %s to node %s", diskURI, nodeName) @@ -487,7 +487,7 @@ func (d *DriverV2) ControllerUnpublishVolume(ctx context.Context, req *csi.Contr diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_unpublish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -579,7 +579,7 @@ func (d *DriverV2) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) func (d *DriverV2) listVolumesInCluster(ctx context.Context, start, maxEntries int) (*csi.ListVolumesResponse, error) { pvList, err := d.cloud.KubeClient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err.Error()) + return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err) } // get all resource groups and put them into a sorted slice @@ -688,7 +688,7 @@ func (d *DriverV2) listVolumesByResourceGroup(ctx context.Context, resourceGroup diskClient := d.clientFactory.GetDiskClient() disks, derr := diskClient.List(ctx, resourceGroup) if derr != nil { - return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %s", resourceGroup, derr.Error())} + return listVolumeStatus{err: status.Errorf(codes.Internal, "ListVolumes on rg(%s) failed with error: %v", resourceGroup, derr)} } // if volSet is initialized but is empty, return if volSet != nil && len(volSet) == 0 { @@ -874,7 +874,7 @@ func (d *DriverV2) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRe customTagsMap, err := volumehelper.ConvertTagsToMap(customTags, tagValueDelimiter) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Errorf(codes.InvalidArgument, "%v", err) } tags := make(map[string]*string) for k, v := range customTagsMap { @@ -950,7 +950,7 @@ func (d *DriverV2) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRe if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } @@ -1000,7 +1000,7 @@ func (d *DriverV2) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequ // no SnapshotId is set, return all snapshots that satisfy the request. snapshots, err := snapshotClient.List(ctx, d.cloud.ResourceGroup) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err.Error())) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err)) } return azureutils.GetEntriesAndNextToken(req, snapshots) @@ -1012,7 +1012,7 @@ func (d *DriverV2) getSnapshotByID(ctx context.Context, subsID, resourceGroup, s if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } snapshotClient, err := d.clientFactory.GetSnapshotClientForSub(subsID) @@ -1021,7 +1021,7 @@ func (d *DriverV2) getSnapshotByID(ctx context.Context, subsID, resourceGroup, s } snapshot, rerr := snapshotClient.Get(ctx, resourceGroup, snapshotName) if rerr != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("get snapshot %s from rg(%s) error: %v", snapshotName, resourceGroup, rerr.Error())) + return nil, status.Error(codes.Internal, fmt.Sprintf("get snapshot %s from rg(%s) error: %v", snapshotName, resourceGroup, rerr)) } return azureutils.GenerateCSISnapshot(sourceVolumeID, snapshot) diff --git a/pkg/azuredisk/fake_azuredisk.go b/pkg/azuredisk/fake_azuredisk.go index a88cde2b3e..170fee5432 100644 --- a/pkg/azuredisk/fake_azuredisk.go +++ b/pkg/azuredisk/fake_azuredisk.go @@ -132,7 +132,7 @@ func newFakeDriverV1(ctrl *gomock.Controller) (*fakeDriverV1, error) { driver.mounter = mounter - cache, err := azcache.NewTimedCache(time.Minute, func(key string) (interface{}, error) { + cache, err := azcache.NewTimedCache(time.Minute, func(_ string) (interface{}, error) { return nil, nil }, false) if err != nil { diff --git a/pkg/azuredisk/nodeserver.go b/pkg/azuredisk/nodeserver.go index 09c10a66fc..85f32730bb 100644 --- a/pkg/azuredisk/nodeserver.go +++ b/pkg/azuredisk/nodeserver.go @@ -263,7 +263,7 @@ func (d *Driver) NodePublishVolume(_ context.Context, req *csi.NodePublishVolume } klog.V(2).Infof("NodePublishVolume [block]: found device path %s with lun %s", source, lun) if err = d.ensureBlockTargetFile(target); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } case *csi.VolumeCapability_Mount: mnt, err := d.ensureMountPoint(target) @@ -497,7 +497,7 @@ func (d *Driver) NodeExpandVolume(_ context.Context, req *csi.NodeExpandVolumeRe devicePath, err := getDevicePathWithMountPath(volumePath, d.mounter) if err != nil { - return nil, status.Errorf(codes.NotFound, err.Error()) + return nil, status.Errorf(codes.NotFound, "%v", err) } if d.enableDiskOnlineResize { diff --git a/pkg/azuredisk/nodeserver_test.go b/pkg/azuredisk/nodeserver_test.go index 77f178a3ae..36b6481368 100644 --- a/pkg/azuredisk/nodeserver_test.go +++ b/pkg/azuredisk/nodeserver_test.go @@ -209,7 +209,7 @@ func TestEnsureMountPoint(t *testing.T) { if !(runtime.GOOS == "windows" && test.skipOnWindows) && !(runtime.GOOS == "darwin" && test.skipOnDarwin) { mnt, err := d.ensureMountPoint(test.target) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } if err == nil { assert.Equal(t, test.expectedMnt, mnt) @@ -235,8 +235,8 @@ func TestNodeGetInfo(t *testing.T) { desc string expectedErr error skipOnDarwin bool - setupFunc func(t *testing.T, d FakeDriver) - validateFunc func(t *testing.T, resp *csi.NodeGetInfoResponse) + setupFunc func(_ *testing.T, _ FakeDriver) + validateFunc func(_ *testing.T, _ *csi.NodeGetInfoResponse) }{ { desc: "[Success] Get node information for existing VM", @@ -271,7 +271,7 @@ func TestNodeGetInfo(t *testing.T) { { desc: "[Failure] Get node information for non-existing VM", expectedErr: status.Error(codes.Internal, fmt.Sprintf("getNodeInfoFromLabels on node(%s) failed with %s", "fakeNodeID", "kubeClient is nil")), - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getCloud().VirtualMachinesClient.(*mockvmclient.MockInterface).EXPECT(). Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(compute.VirtualMachine{}, notFoundErr). @@ -340,7 +340,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { desc: "Block volume path success", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getHostUtil().(*azureutils.FakeHostUtil).SetPathIsDeviceResult(blockVolumePath, true, nil) d.setNextCommandOutputScripts(blockdevAction) }, @@ -358,7 +358,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { desc: "failed to determine block device", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getHostUtil().(*azureutils.FakeHostUtil).SetPathIsDeviceResult(fakePath, true, fmt.Errorf("host util is not device path")) }, req: csi.NodeGetVolumeStatsRequest{VolumePath: fakePath, VolumeId: "vol_1"}, @@ -481,13 +481,13 @@ func TestNodeStageVolume(t *testing.T) { }, { desc: "Volume operation in progress", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getVolumeLocks().TryAcquire("vol_1") }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, AccessType: stdVolCapBlock}}, expectedErr: status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "vol_1")), - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.getVolumeLocks().Release("vol_1") }, }, @@ -512,7 +512,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blkidAction, blockSizeAction, blkidAction) }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, @@ -527,7 +527,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully with resize", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setNextCommandOutputScripts(blkidAction, fsckAction, blkidAction, resize2fsAction) }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, @@ -542,7 +542,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "failed to get perf attributes", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blockSizeAction) }, @@ -552,7 +552,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContextWithPerfProfileField, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: status.Errorf(codes.Internal, "failed to get perf attributes for /dev/sdd. Error: %v", fmt.Errorf("Perf profile wrong is invalid")), @@ -561,7 +561,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged with performance optimizations", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) diskSupportsPerfOptimizationCall := mockoptimization.EXPECT(). @@ -580,7 +580,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: nil, @@ -589,7 +589,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "failed to optimize device performance", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) diskSupportsPerfOptimizationCall := mockoptimization.EXPECT(). @@ -608,7 +608,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: status.Errorf(codes.Internal, "failed to optimize device performance for target(/dev/sdd) error(%s)", fmt.Errorf("failed to optimize device performance")), @@ -617,7 +617,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged with perf optimization is disabled", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) mockoptimization.EXPECT(). @@ -632,7 +632,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: nil, @@ -746,7 +746,7 @@ func TestNodeUnstageVolume(t *testing.T) { !(runtime.GOOS == "darwin" && test.skipOnDarwin) { _, err := d.NodeUnstageVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -860,8 +860,8 @@ func TestNodePublishVolume(t *testing.T) { skipOnWindows: true, // permission issues skipOnDarwin: true, expectedErr: testutil.TestError{ - DefaultError: status.Errorf(codes.Internal, fmt.Sprintf("could not mount target \"%s\": "+ - "mkdir %s: not a directory", azuredisk, azuredisk)), + DefaultError: status.Errorf(codes.Internal, "could not mount target \"%s\": "+ + "mkdir %s: not a directory", azuredisk, azuredisk), }, }, { @@ -896,8 +896,8 @@ func TestNodePublishVolume(t *testing.T) { Readonly: true}, skipOnWindows: true, // permission issues expectedErr: testutil.TestError{ - DefaultError: status.Errorf(codes.Internal, fmt.Sprintf("could not mount \"%s\" at \"%s\": "+ - "fake Mount: source error", errorMountSource, targetTest)), + DefaultError: status.Errorf(codes.Internal, "could not mount \"%s\" at \"%s\": "+ + "fake Mount: source error", errorMountSource, targetTest), }, }, { @@ -937,7 +937,7 @@ func TestNodePublishVolume(t *testing.T) { var err error _, err = d.NodePublishVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -1013,7 +1013,7 @@ func TestNodeUnpublishVolume(t *testing.T) { !(test.skipOnDarwin && runtime.GOOS == "darwin") { _, err := d.NodeUnpublishVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -1220,7 +1220,7 @@ func TestNodeExpandVolume(t *testing.T) { _, err := d.NodeExpandVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } err = os.RemoveAll(targetTest) @@ -1274,7 +1274,7 @@ func TestGetBlockSizeBytes(t *testing.T) { for _, test := range tests { _, err := getBlockSizeBytes(test.req, d.getMounter()) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } //Setup @@ -1321,7 +1321,7 @@ func TestEnsureBlockTargetFile(t *testing.T) { for _, test := range tests { err := d.ensureBlockTargetFile(test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } err = os.RemoveAll(testTarget) diff --git a/pkg/azuredisk/nodeserver_v2.go b/pkg/azuredisk/nodeserver_v2.go index d21f205247..bea93bbda6 100644 --- a/pkg/azuredisk/nodeserver_v2.go +++ b/pkg/azuredisk/nodeserver_v2.go @@ -255,7 +255,7 @@ func (d *DriverV2) NodePublishVolume(ctx context.Context, req *csi.NodePublishVo } klog.V(2).Infof("NodePublishVolume [block]: found device path %s with lun %s", source, lun) if err = d.ensureBlockTargetFile(target); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } case *csi.VolumeCapability_Mount: mnt, err := d.ensureMountPoint(target) @@ -455,7 +455,7 @@ func (d *DriverV2) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolu devicePath, err := getDevicePathWithMountPath(volumePath, d.mounter) if err != nil { - return nil, status.Errorf(codes.NotFound, err.Error()) + return nil, status.Errorf(codes.NotFound, "%v", err) } if d.enableDiskOnlineResize { diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index 42fa0cf3fc..5f707f3437 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -44,7 +44,7 @@ func ParseEndpoint(ep string) (string, string, error) { func Listen(ctx context.Context, endpoint string) (net.Listener, error) { proto, addr, err := ParseEndpoint(endpoint) if err != nil { - klog.Errorf(err.Error()) + klog.Errorf("%v", err) return nil, err } @@ -53,7 +53,7 @@ func Listen(ctx context.Context, endpoint string) (net.Listener, error) { addr = "/" + addr } if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - klog.Errorf("Failed to remove %s, error: %s", addr, err.Error()) + klog.Errorf("Failed to remove %s, error: %v", addr, err) return nil, err } } diff --git a/pkg/csi-common/utils_test.go b/pkg/csi-common/utils_test.go index 1feca4d275..a7376159a7 100644 --- a/pkg/csi-common/utils_test.go +++ b/pkg/csi-common/utils_test.go @@ -101,7 +101,7 @@ func TestLogGRPC(t *testing.T) { buf := new(bytes.Buffer) klog.SetOutput(buf) - handler := func(ctx context.Context, req interface{}) (interface{}, error) { return nil, nil } + handler := func(_ context.Context, _ interface{}) (interface{}, error) { return nil, nil } info := grpc.UnaryServerInfo{ FullMethod: "fake", } diff --git a/pkg/os/volume/volume.go b/pkg/os/volume/volume.go index fa9c8249cf..d05a378597 100644 --- a/pkg/os/volume/volume.go +++ b/pkg/os/volume/volume.go @@ -71,8 +71,8 @@ func ListVolumesOnDisk(diskNumber uint32, partitionNumber uint32) (volumeIDs []s return []string{}, fmt.Errorf("error list volumes on disk. cmd: %s, output: %s, error: %v", cmd, string(out), err) } - volumeIds := strings.Split(strings.TrimSpace(string(out)), "\r\n") - return volumeIds, nil + volumeIDs = strings.Split(strings.TrimSpace(string(out)), "\r\n") + return volumeIDs, nil } // FormatVolume - Formats a volume with the NTFS format. diff --git a/test/e2e/pre_provisioning_test.go b/test/e2e/pre_provisioning_test.go index 5499efe2d6..5d1eb544f3 100644 --- a/test/e2e/pre_provisioning_test.go +++ b/test/e2e/pre_provisioning_test.go @@ -47,7 +47,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { skipVolumeDeletion bool ) - ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) { + ginkgo.BeforeEach(func(_ ginkgo.SpecContext) { cs = f.ClientSet ns = f.Namespace testDriver = driver.InitAzureDiskDriver() diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 594d32ad6e..bcee7eca31 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -164,7 +164,7 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { } }) -var _ = ginkgo.AfterSuite(func(ctx ginkgo.SpecContext) { +var _ = ginkgo.AfterSuite(func(_ ginkgo.SpecContext) { // Default storage driver configuration is CSI. Freshly built // CSI driver is installed for that case. if isTestingMigration || isUsingInTreeVolumePlugin {