From b0ef93f50b64d334433024c4dc28642882bdefc3 Mon Sep 17 00:00:00 2001 From: umagnus Date: Wed, 10 Aug 2022 03:04:27 +0000 Subject: [PATCH 1/2] add nodeserver.go unit test add skipOnDarwin in TestNodePublishVolume and TestEnsureMountPoint --- pkg/azuredisk/nodeserver_test.go | 122 ++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 2 deletions(-) diff --git a/pkg/azuredisk/nodeserver_test.go b/pkg/azuredisk/nodeserver_test.go index dbe58c3071..73cee47ae6 100644 --- a/pkg/azuredisk/nodeserver_test.go +++ b/pkg/azuredisk/nodeserver_test.go @@ -159,6 +159,7 @@ func TestEnsureMountPoint(t *testing.T) { desc string target string skipOnWindows bool + skipOnDarwin bool expectedMnt bool expectedErr testutil.TestError }{ @@ -175,6 +176,7 @@ func TestEnsureMountPoint(t *testing.T) { desc: "[Error] Not a directory", target: azuredisk, skipOnWindows: true, // no error reported in windows + skipOnDarwin: true, expectedErr: testutil.TestError{ DefaultError: &os.PathError{Op: "mkdir", Path: azuredisk, Err: syscall.ENOTDIR}, }, @@ -202,7 +204,7 @@ func TestEnsureMountPoint(t *testing.T) { d.setMounter(fakeMounter) for _, test := range tests { - if !(runtime.GOOS == "windows" && test.skipOnWindows) { + if !(runtime.GOOS == "windows" && test.skipOnWindows) && !(runtime.GOOS == "darwin" && test.skipOnDarwin) { mnt, err := d.ensureMountPoint(test.target) if !testutil.AssertError(&test.expectedErr, err) { t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) @@ -349,6 +351,16 @@ func TestNodeGetVolumeStats(t *testing.T) { skipOnWindows: true, expectedErr: nil, }, + { + desc: "failed to determine block device", + setupFunc: func(t *testing.T, d FakeDriver) { + d.getHostUtil().(*azureutils.FakeHostUtil).SetPathIsDeviceResult(fakePath, true, fmt.Errorf("host util is not device path")) + }, + req: csi.NodeGetVolumeStatsRequest{VolumePath: fakePath, VolumeId: "vol_1"}, + skipOnDarwin: true, + skipOnWindows: true, + expectedErr: status.Errorf(codes.NotFound, "failed to determine whether %s is block device: %v", fakePath, fmt.Errorf("host util is not device path")), + }, } // Setup @@ -392,6 +404,12 @@ func TestNodeStageVolume(t *testing.T) { volumeContextWithResize := map[string]string{ consts.ResizeRequired: "true", } + volumeContextWithMaxShare := map[string]string{ + consts.MaxSharesField: "0.1", + } + volumeContextWithPerfProfileField := map[string]string{ + consts.PerfProfileField: "wrong", + } stdVolCapBlock := &csi.VolumeCapability_Block{ Block: &csi.VolumeCapability_BlockVolume{}, @@ -448,6 +466,12 @@ func TestNodeStageVolume(t *testing.T) { req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCapWrong}}, expectedErr: status.Error(codes.InvalidArgument, "Volume capability not supported"), }, + { + desc: "MaxShares value not supported", + req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, + AccessType: stdVolCapBlock}, VolumeContext: volumeContextWithMaxShare}, + expectedErr: status.Error(codes.InvalidArgument, "MaxShares value not supported"), + }, { desc: "Volume operation in progress", setupFunc: func(t *testing.T, d FakeDriver) { @@ -507,6 +531,25 @@ func TestNodeStageVolume(t *testing.T) { }, expectedErr: nil, }, + { + desc: "failed to get perf attributes", + skipOnDarwin: true, + skipOnWindows: true, + setupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(true) + d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blockSizeAction) + }, + req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, + VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, + AccessType: stdVolCap}, + PublishContext: publishContext, + VolumeContext: volumeContextWithPerfProfileField, + }, + cleanupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(false) + }, + expectedErr: status.Errorf(codes.Internal, "failed to get perf attributes for /dev/sdd. Error: %v", fmt.Errorf("Perf profile wrong is invalid")), + }, { desc: "Successfully staged with performance optimizations", skipOnDarwin: true, @@ -535,6 +578,58 @@ func TestNodeStageVolume(t *testing.T) { }, expectedErr: nil, }, + { + desc: "failed to optimize device performance", + skipOnDarwin: true, + skipOnWindows: true, + setupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(true) + mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) + diskSupportsPerfOptimizationCall := mockoptimization.EXPECT(). + DiskSupportsPerfOptimization(gomock.Any(), gomock.Any()). + Return(true) + mockoptimization.EXPECT(). + OptimizeDiskPerformance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(fmt.Errorf("failed to optimize device performance")). + After(diskSupportsPerfOptimizationCall) + + d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blockSizeAction) + }, + req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, + VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, + AccessType: stdVolCap}, + PublishContext: publishContext, + VolumeContext: volumeContext, + }, + cleanupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(false) + }, + expectedErr: status.Errorf(codes.Internal, "failed to optimize device performance for target(/dev/sdd) error(%s)", fmt.Errorf("failed to optimize device performance")), + }, + { + desc: "Successfully staged with perf optimization is disabled", + skipOnDarwin: true, + skipOnWindows: true, + setupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(true) + mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) + mockoptimization.EXPECT(). + DiskSupportsPerfOptimization(gomock.Any(), gomock.Any()). + Return(false) + + d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blockSizeAction) + }, + req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, + VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, + AccessType: stdVolCap}, + PublishContext: publishContext, + VolumeContext: volumeContext, + }, + cleanupFunc: func(t *testing.T, d FakeDriver) { + d.setPerfOptimizationEnabled(false) + }, + expectedErr: nil, + }, } // Setup @@ -659,6 +754,10 @@ func TestNodePublishVolume(t *testing.T) { d, _ := NewFakeDriver(t) volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER} + volumeCapWrong := csi.VolumeCapability_AccessMode{Mode: 10} + volumeContextWithMaxShare := map[string]string{ + consts.MaxSharesField: "0.1", + } publishContext := map[string]string{ consts.LUN: "/dev/01", } @@ -688,6 +787,7 @@ func TestNodePublishVolume(t *testing.T) { setup func() req csi.NodePublishVolumeRequest skipOnWindows bool + skipOnDarwin bool expectedErr testutil.TestError cleanup func() }{ @@ -705,6 +805,23 @@ func TestNodePublishVolume(t *testing.T) { DefaultError: status.Error(codes.InvalidArgument, "Volume ID missing in the request"), }, }, + { + desc: "MaxShares value not supported", + req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, AccessType: stdVolCapBlock}, + VolumeId: "vol_1", + VolumeContext: volumeContextWithMaxShare}, + expectedErr: testutil.TestError{ + DefaultError: status.Error(codes.InvalidArgument, "MaxShares value not supported"), + }, + }, + { + desc: "Volume capability not supported", + req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCapWrong, AccessType: stdVolCapBlock}, + VolumeId: "vol_1"}, + expectedErr: testutil.TestError{ + DefaultError: status.Error(codes.InvalidArgument, "Volume capability not supported"), + }, + }, { desc: "Staging target path missing", req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, AccessType: stdVolCapBlock}, @@ -730,6 +847,7 @@ func TestNodePublishVolume(t *testing.T) { StagingTargetPath: sourceTest, Readonly: true}, skipOnWindows: true, // permission issues + skipOnDarwin: true, expectedErr: testutil.TestError{ DefaultError: status.Errorf(codes.Internal, fmt.Sprintf("could not mount target \"%s\": "+ "mkdir %s: not a directory", azuredisk, azuredisk)), @@ -804,7 +922,7 @@ func TestNodePublishVolume(t *testing.T) { if test.setup != nil { test.setup() } - if !(test.skipOnWindows && runtime.GOOS == "windows") { + if !(test.skipOnWindows && runtime.GOOS == "windows") && !(test.skipOnDarwin && runtime.GOOS == "darwin") { var err error _, err = d.NodePublishVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { From b241ee1c51079a98cfcc5c92e399d341ce7466ce Mon Sep 17 00:00:00 2001 From: umagnus Date: Wed, 10 Aug 2022 07:56:04 +0000 Subject: [PATCH 2/2] fix TestLogGRPC ut failure fix TestLogGRPC ut failure --- pkg/csi-common/server_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/csi-common/server_test.go b/pkg/csi-common/server_test.go index 636bc7eef0..b5f6ac1c42 100644 --- a/pkg/csi-common/server_test.go +++ b/pkg/csi-common/server_test.go @@ -19,6 +19,7 @@ package csicommon import ( "sync" "testing" + "time" "github.com/stretchr/testify/assert" "google.golang.org/grpc" @@ -31,7 +32,10 @@ func TestNewNonBlockingGRPCServer(t *testing.T) { func TestStart(t *testing.T) { s := NewNonBlockingGRPCServer() + // sleep a while to avoid race condition in unit test + time.Sleep(time.Millisecond * 2000) s.Start("tcp://127.0.0.1:0", nil, nil, nil, true) + time.Sleep(time.Millisecond * 2000) } func TestServe(t *testing.T) {