From 3687d4ad98a2156e74548bb5ca3a0c297be1a9c1 Mon Sep 17 00:00:00 2001 From: CMGS Date: Wed, 2 Nov 2022 17:57:54 +0800 Subject: [PATCH] fix tests --- cluster/calcium/calcium_test.go | 2 +- cluster/calcium/create_test.go | 2 +- cluster/calcium/lock_test.go | 10 +- cluster/calcium/realloc_test.go | 9 +- cluster/calcium/remap_test.go | 2 +- cluster/calcium/remove_test.go | 2 +- cluster/calcium/wal_test.go | 22 ++--- engine/docker/helper_test.go | 2 +- go.mod | 2 +- lock/etcdlock/mutex_test.go | 24 ++--- resources/volume/models/alloc.go | 5 +- resources/volume/models/alloc_test.go | 4 +- resources/volume/models/info_test.go | 2 +- resources/volume/models/realloc.go | 5 +- resources/volume/models/realloc_test.go | 4 +- resources/volume/schedule/schedule.go | 25 ++--- resources/volume/types/errors.go | 9 +- rpc/counter_test.go | 2 +- source/common/common_test.go | 6 +- store/etcdv3/meta/etcd_test.go | 52 +++++----- store/etcdv3/node_test.go | 26 ++--- store/redis/node_test.go | 14 +-- strategy/average_test.go | 20 ++-- strategy/communism_test.go | 126 ++++++++++++------------ strategy/fill_test.go | 18 ++-- strategy/global_test.go | 18 ++-- strategy/strategy_test.go | 2 +- utils/utils_test.go | 10 +- wal/hydro_test.go | 12 +-- wal/wal_test.go | 2 +- 30 files changed, 221 insertions(+), 218 deletions(-) diff --git a/cluster/calcium/calcium_test.go b/cluster/calcium/calcium_test.go index 46d5f126f..b01e211c6 100644 --- a/cluster/calcium/calcium_test.go +++ b/cluster/calcium/calcium_test.go @@ -56,7 +56,7 @@ func NewTestCluster() *Calcium { } func TestNewCluster(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() config := types.Config{WALFile: "/tmp/a", HAKeepaliveInterval: 16 * time.Second} _, err := New(ctx, config, nil) assert.Error(t, err) diff --git a/cluster/calcium/create_test.go b/cluster/calcium/create_test.go index d9744e781..f9aa62997 100644 --- a/cluster/calcium/create_test.go +++ b/cluster/calcium/create_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" diff --git a/cluster/calcium/lock_test.go b/cluster/calcium/lock_test.go index 0c11641d4..fc170139f 100644 --- a/cluster/calcium/lock_test.go +++ b/cluster/calcium/lock_test.go @@ -27,7 +27,7 @@ func TestDoLock(t *testing.T) { lock := &lockmocks.DistributedLock{} store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) // lock failed - lock.On("Lock", mock.Anything).Return(context.TODO(), types.ErrMockError).Once() + lock.On("Lock", mock.Anything).Return(context.Background(), types.ErrMockError).Once() lock.On("Unlock", mock.Anything).Return(nil).Once() _, _, err = c.doLock(ctx, "somename", 1) assert.Error(t, err) @@ -57,7 +57,7 @@ func TestWithWorkloadsLocked(t *testing.T) { store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) lock.On("Unlock", mock.Anything).Return(nil) // failed to get lock - lock.On("Lock", mock.Anything).Return(context.TODO(), types.ErrMockError).Once() + lock.On("Lock", mock.Anything).Return(context.Background(), types.ErrMockError).Once() store.On("GetWorkloads", mock.Anything, mock.Anything).Return([]*types.Workload{{}}, nil).Once() err := c.withWorkloadsLocked(ctx, []string{"c1", "c2"}, func(ctx context.Context, workloads map[string]*types.Workload) error { return nil }) assert.Error(t, err) @@ -90,7 +90,7 @@ func TestWithWorkloadLocked(t *testing.T) { store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) lock.On("Unlock", mock.Anything).Return(nil) // failed to get lock - lock.On("Lock", mock.Anything).Return(context.TODO(), types.ErrMockError).Once() + lock.On("Lock", mock.Anything).Return(context.Background(), types.ErrMockError).Once() store.On("GetWorkloads", mock.Anything, mock.Anything).Return([]*types.Workload{{}}, nil).Once() err := c.withWorkloadLocked(ctx, "c1", func(ctx context.Context, workload *types.Workload) error { return nil }) assert.Error(t, err) @@ -155,7 +155,7 @@ func TestWithNodesPodLocked(t *testing.T) { store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) lock.On("Unlock", mock.Anything).Return(nil) // failed to get lock - lock.On("Lock", mock.Anything).Return(context.TODO(), types.ErrMockError).Once() + lock.On("Lock", mock.Anything).Return(context.Background(), types.ErrMockError).Once() err = c.withNodesPodLocked(ctx, &types.NodeFilter{Podname: "test", Includes: []string{"test"}, All: false}, func(ctx context.Context, nodes map[string]*types.Node) error { return nil }) assert.Error(t, err) lock.On("Lock", mock.Anything).Return(ctx, nil) @@ -247,7 +247,7 @@ func TestWithNodesOperationLocked(t *testing.T) { store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) lock.On("Unlock", mock.Anything).Return(nil) // failed to get lock - lock.On("Lock", mock.Anything).Return(context.TODO(), types.ErrMockError).Once() + lock.On("Lock", mock.Anything).Return(context.Background(), types.ErrMockError).Once() err = c.withNodesOperationLocked(ctx, &types.NodeFilter{Podname: "test", Includes: []string{"test"}, All: false}, func(ctx context.Context, nodes map[string]*types.Node) error { return nil }) assert.Error(t, err) lock.On("Lock", mock.Anything).Return(ctx, nil) diff --git a/cluster/calcium/realloc_test.go b/cluster/calcium/realloc_test.go index 1d3aa4297..4bda7013a 100644 --- a/cluster/calcium/realloc_test.go +++ b/cluster/calcium/realloc_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/cockroachdb/errors" enginemocks "github.com/projecteru2/core/engine/mocks" enginetypes "github.com/projecteru2/core/engine/types" lockmocks "github.com/projecteru2/core/lock/mocks" @@ -52,7 +53,7 @@ func TestRealloc(t *testing.T) { } } - store.On("GetWorkload", mock.Anything, "c1").Return(newC1(context.TODO(), nil)[0], nil) + store.On("GetWorkload", mock.Anything, "c1").Return(newC1(context.Background(), nil)[0], nil) opts := &types.ReallocOptions{ ID: "c1", ResourceOpts: types.WorkloadResourceOpts{}, @@ -61,14 +62,14 @@ func TestRealloc(t *testing.T) { // failed by GetNode store.On("GetNode", mock.Anything, "node1").Return(nil, types.ErrMockError).Once() err := c.ReallocResource(ctx, opts) - assert.EqualError(t, err, "ETCD must be set") + assert.True(t, errors.Is(err, types.ErrMockError)) store.AssertExpectations(t) store.On("GetNode", mock.Anything, "node1").Return(node1, nil) // failed by lock store.On("CreateLock", mock.Anything, mock.Anything).Return(nil, types.ErrMockError).Once() err = c.ReallocResource(ctx, opts) - assert.EqualError(t, err, "ETCD must be set") + assert.True(t, errors.Is(err, types.ErrMockError)) store.AssertExpectations(t) store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) store.On("GetWorkloads", mock.Anything, []string{"c1"}).Return(newC1, nil) @@ -90,7 +91,7 @@ func TestRealloc(t *testing.T) { // failed by UpdateWorkload store.On("UpdateWorkload", mock.Anything, mock.Anything).Return(types.ErrMockError).Once() err = c.ReallocResource(ctx, opts) - assert.EqualError(t, err, "ETCD must be set") + assert.True(t, errors.Is(err, types.ErrMockError)) store.AssertExpectations(t) store.On("UpdateWorkload", mock.Anything, mock.Anything).Return(nil) diff --git a/cluster/calcium/remap_test.go b/cluster/calcium/remap_test.go index 6f98bce38..6e382d320 100644 --- a/cluster/calcium/remap_test.go +++ b/cluster/calcium/remap_test.go @@ -47,5 +47,5 @@ func TestRemapResource(t *testing.T) { lock.On("Lock", mock.Anything).Return(context.Background(), nil) lock.On("Unlock", mock.Anything).Return(nil) store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) - c.doRemapResourceAndLog(context.TODO(), log.WithField("test", "zc"), node) + c.doRemapResourceAndLog(context.Background(), log.WithField("test", "zc"), node) } diff --git a/cluster/calcium/remove_test.go b/cluster/calcium/remove_test.go index 7da38a977..e133916ce 100644 --- a/cluster/calcium/remove_test.go +++ b/cluster/calcium/remove_test.go @@ -12,7 +12,7 @@ import ( storemocks "github.com/projecteru2/core/store/mocks" "github.com/projecteru2/core/types" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) diff --git a/cluster/calcium/wal_test.go b/cluster/calcium/wal_test.go index 476993b81..ead40f90f 100644 --- a/cluster/calcium/wal_test.go +++ b/cluster/calcium/wal_test.go @@ -44,11 +44,11 @@ func TestHandleCreateWorkloadNoHandle(t *testing.T) { store.On("GetWorkload", mock.Anything, wrkid).Return(wrk, nil).Once() store.On("GetWorkloads", mock.Anything, mock.Anything).Return(nil, nil) - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) store.AssertExpectations(t) // Recovers nothing. - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) } func TestHandleCreateWorkloadError(t *testing.T) { @@ -83,26 +83,26 @@ func TestHandleCreateWorkloadError(t *testing.T) { err = errors.Wrapf(types.ErrInvaildCount, "keys: [%s]", wrkid) store.On("GetWorkload", mock.Anything, mock.Anything).Return(wrk, err).Once() store.On("GetNode", mock.Anything, mock.Anything).Return(nil, err).Once() - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) store.AssertExpectations(t) engine.AssertExpectations(t) store.On("GetWorkload", mock.Anything, mock.Anything).Return(wrk, err).Once() store.On("GetNode", mock.Anything, wrk.Nodename).Return(node, nil).Once() engine.On("VirtualizationRemove", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(err).Once() - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) store.AssertExpectations(t) engine.AssertExpectations(t) store.On("GetWorkload", mock.Anything, wrkid).Return(wrk, fmt.Errorf("err")).Once() store.On("GetNode", mock.Anything, mock.Anything).Return(node, nil).Once() engine.On("VirtualizationRemove", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(types.ErrWorkloadNotExists).Once() - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) store.AssertExpectations(t) engine.AssertExpectations(t) // Nothing recovered. - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) } func TestHandleCreateWorkloadHandled(t *testing.T) { @@ -145,12 +145,12 @@ func TestHandleCreateWorkloadHandled(t *testing.T) { Return(nil). Once() - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) store.AssertExpectations(t) eng.AssertExpectations(t) // Recovers nothing. - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) } func TestHandleCreateLambda(t *testing.T) { @@ -191,7 +191,7 @@ func TestHandleCreateLambda(t *testing.T) { store := c.store.(*storemocks.Store) store.On("GetWorkload", mock.Anything, mock.Anything).Return(nil, types.ErrMockError).Once() - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) time.Sleep(500 * time.Millisecond) store.AssertExpectations(t) @@ -219,9 +219,9 @@ func TestHandleCreateLambda(t *testing.T) { lock.On("Unlock", mock.Anything).Return(nil) store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) // Recovered nothing. - c.wal.Recover(context.TODO()) + c.wal.Recover(context.Background()) time.Sleep(500 * time.Millisecond) store.AssertExpectations(t) eng.AssertExpectations(t) diff --git a/engine/docker/helper_test.go b/engine/docker/helper_test.go index aa28eb17e..bbd59293e 100644 --- a/engine/docker/helper_test.go +++ b/engine/docker/helper_test.go @@ -29,7 +29,7 @@ func TestWithDumpFiles(t *testing.T) { } fp := []string{} for target, content := range data { - withTarfileDump(context.TODO(), target, content, 0, 0, int64(0), func(target, tarfile string) error { + withTarfileDump(context.Background(), target, content, 0, 0, int64(0), func(target, tarfile string) error { assert.True(t, strings.HasPrefix(target, "/tmp/test")) fp = append(fp, tarfile) _, err := os.Stat(tarfile) diff --git a/go.mod b/go.mod index 8f90f6951..94590d688 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,6 @@ require ( github.com/opencontainers/image-spec v1.0.2 github.com/panjf2000/ants/v2 v2.6.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pkg/errors v0.9.1 github.com/projecteru2/libyavirt v0.0.0-20220621042712-95cdc6363b1c github.com/prometheus/client_golang v1.11.0 github.com/rs/zerolog v1.28.0 @@ -99,6 +98,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/runc v1.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.26.0 // indirect diff --git a/lock/etcdlock/mutex_test.go b/lock/etcdlock/mutex_test.go index 8ceaed6f7..67ec0984c 100644 --- a/lock/etcdlock/mutex_test.go +++ b/lock/etcdlock/mutex_test.go @@ -31,13 +31,13 @@ func TestMutex(t *testing.T) { m2, err := New(cli, "test", time.Second) assert.NoError(t, err) - _, err = m2.Lock(context.TODO()) + _, err = m2.Lock(context.Background()) m3, err := New(cli, "test", 100*time.Millisecond) assert.NoError(t, err) - _, err = m3.Lock(context.TODO()) + _, err = m3.Lock(context.Background()) assert.EqualError(t, err, "context deadline exceeded") - m2.Unlock(context.TODO()) - m3.Unlock(context.TODO()) + m2.Unlock(context.Background()) + m3.Unlock(context.Background()) // round 3: ctx canceled after lock secured m4, err := New(cli, "test", time.Second) @@ -47,7 +47,7 @@ func TestMutex(t *testing.T) { rCtx, err := m4.Lock(ctx) <-rCtx.Done() assert.EqualError(t, rCtx.Err(), "context deadline exceeded") - m4.Unlock(context.TODO()) + m4.Unlock(context.Background()) // round 4: passive release @@ -75,8 +75,8 @@ func TestTryLock(t *testing.T) { assert.Nil(t, ctx2) assert.Error(t, err) - assert.NoError(t, m1.Unlock(context.TODO())) - assert.NoError(t, m2.Unlock(context.TODO())) + assert.NoError(t, m1.Unlock(context.Background())) + assert.NoError(t, m2.Unlock(context.Background())) // round 2: lock conflict @@ -85,12 +85,12 @@ func TestTryLock(t *testing.T) { m4, err := New(cli, "test", time.Second) assert.NoError(t, err) - rCtx, err := m3.TryLock(context.TODO()) + rCtx, err := m3.TryLock(context.Background()) assert.NoError(t, err) - _, err = m4.TryLock(context.TODO()) + _, err = m4.TryLock(context.Background()) assert.EqualError(t, err, "mutex: Locked by another session") - m4.Unlock(context.TODO()) - m3.Unlock(context.TODO()) + m4.Unlock(context.Background()) + m3.Unlock(context.Background()) assert.NoError(t, rCtx.Err()) // round 3: ctx canceled after lock secured @@ -101,7 +101,7 @@ func TestTryLock(t *testing.T) { rCtx, err = m5.TryLock(ctx) <-rCtx.Done() assert.EqualError(t, rCtx.Err(), "context deadline exceeded") - m5.Unlock(context.TODO()) + m5.Unlock(context.Background()) // round 4: passive release diff --git a/resources/volume/models/alloc.go b/resources/volume/models/alloc.go index 3cfe8b807..37838bfeb 100644 --- a/resources/volume/models/alloc.go +++ b/resources/volume/models/alloc.go @@ -9,6 +9,7 @@ import ( "github.com/projecteru2/core/log" "github.com/projecteru2/core/resources/volume/schedule" "github.com/projecteru2/core/resources/volume/types" + coretypes "github.com/projecteru2/core/types" "github.com/projecteru2/core/utils" ) @@ -92,7 +93,7 @@ func (v *Volume) doAlloc(resourceInfo *types.NodeResourceInfo, deployCount int, if opts.StorageRequest > 0 { storageCapacity := int((resourceInfo.Capacity.Storage - resourceInfo.Usage.Storage) / opts.StorageRequest) if storageCapacity < deployCount { - return nil, nil, errors.Wrapf(types.ErrInsufficientResource, "not enough storage, request: %+v, available: %+v", opts.StorageRequest, storageCapacity) + return nil, nil, errors.Wrapf(coretypes.ErrInsufficientResource, "not enough storage, request: %+v, available: %+v", opts.StorageRequest, storageCapacity) } } @@ -111,7 +112,7 @@ func (v *Volume) doAlloc(resourceInfo *types.NodeResourceInfo, deployCount int, } else { volumePlans, diskPlans = schedule.GetVolumePlans(resourceInfo, opts.VolumesRequest, v.Config.Scheduler.MaxDeployCount) if len(volumePlans) < deployCount { - return nil, nil, errors.Wrapf(types.ErrInsufficientResource, "not enough volume plan, need %+v, available %+v", deployCount, len(volumePlans)) + return nil, nil, errors.Wrapf(coretypes.ErrInsufficientResource, "not enough volume plan, need %+v, available %+v", deployCount, len(volumePlans)) } volumePlans = volumePlans[:deployCount] diskPlans = diskPlans[:deployCount] diff --git a/resources/volume/models/alloc_test.go b/resources/volume/models/alloc_test.go index f8c5eff24..9a1cfd8d3 100644 --- a/resources/volume/models/alloc_test.go +++ b/resources/volume/models/alloc_test.go @@ -42,7 +42,7 @@ func TestAlloc(t *testing.T) { }), } _, _, err = volume.GetDeployArgs(ctx, node, 2, resourceOpts) - assert.ErrorIs(t, err, types.ErrInsufficientResource) + assert.ErrorIs(t, err, coretypes.ErrInsufficientResource) // normal case resourceOpts = &types.WorkloadResourceOpts{ @@ -61,7 +61,7 @@ func TestAlloc(t *testing.T) { }), } _, _, err = volume.GetDeployArgs(ctx, node, 3, resourceOpts) - assert.ErrorIs(t, err, types.ErrInsufficientResource) + assert.ErrorIs(t, err, coretypes.ErrInsufficientResource) // normal case resourceOpts = &types.WorkloadResourceOpts{ diff --git a/resources/volume/models/info_test.go b/resources/volume/models/info_test.go index 9b72fbf23..ca8951900 100644 --- a/resources/volume/models/info_test.go +++ b/resources/volume/models/info_test.go @@ -4,8 +4,8 @@ import ( "context" "testing" + "github.com/cockroachdb/errors" "github.com/docker/go-units" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/projecteru2/core/resources/volume/types" diff --git a/resources/volume/models/realloc.go b/resources/volume/models/realloc.go index c8c57681b..f028de671 100644 --- a/resources/volume/models/realloc.go +++ b/resources/volume/models/realloc.go @@ -8,6 +8,7 @@ import ( "github.com/projecteru2/core/log" "github.com/projecteru2/core/resources/volume/schedule" "github.com/projecteru2/core/resources/volume/types" + coretypes "github.com/projecteru2/core/types" "github.com/projecteru2/core/utils" ) @@ -45,7 +46,7 @@ func (v *Volume) GetReallocArgs(ctx context.Context, node string, originResource } if finalWorkloadResourceArgs.StorageRequest-originResourceArgs.StorageRequest > resourceInfo.Capacity.Storage-resourceInfo.Usage.Storage { - return nil, nil, nil, types.ErrInsufficientResource + return nil, nil, nil, coretypes.ErrInsufficientResource } var volumePlan types.VolumePlan @@ -53,7 +54,7 @@ func (v *Volume) GetReallocArgs(ctx context.Context, node string, originResource if needVolumeReschedule { volumePlan, diskPlan, err = schedule.GetAffinityPlan(resourceInfo, resourceOpts.VolumesRequest, originResourceArgs.VolumePlanRequest, originResourceArgs.VolumesRequest) if err != nil { - return nil, nil, nil, types.ErrInsufficientResource + return nil, nil, nil, coretypes.ErrInsufficientResource } } else { volumePlan = originResourceArgs.VolumePlanRequest diff --git a/resources/volume/models/realloc_test.go b/resources/volume/models/realloc_test.go index 847f04d2d..e11a83cf4 100644 --- a/resources/volume/models/realloc_test.go +++ b/resources/volume/models/realloc_test.go @@ -84,7 +84,7 @@ func TestRealloc(t *testing.T) { StorageLimit: 4 * units.TiB, } _, _, _, err = volume.GetReallocArgs(ctx, node, originResourceArgs, opts) - assert.ErrorIs(t, err, types.ErrInsufficientResource) + assert.ErrorIs(t, err, coretypes.ErrInsufficientResource) // insufficient volume bindings = generateVolumeBindings(t, []string{ @@ -97,7 +97,7 @@ func TestRealloc(t *testing.T) { StorageLimit: 0, } _, _, _, err = volume.GetReallocArgs(ctx, node, originResourceArgs, opts) - assert.ErrorIs(t, err, types.ErrInsufficientResource) + assert.ErrorIs(t, err, coretypes.ErrInsufficientResource) // normal case bindings = generateVolumeBindings(t, []string{ diff --git a/resources/volume/schedule/schedule.go b/resources/volume/schedule/schedule.go index 29befc327..6a0e10651 100644 --- a/resources/volume/schedule/schedule.go +++ b/resources/volume/schedule/schedule.go @@ -9,6 +9,7 @@ import ( "github.com/projecteru2/core/log" "github.com/projecteru2/core/resources/volume/types" + coretypes "github.com/projecteru2/core/types" "github.com/projecteru2/core/utils" ) @@ -136,13 +137,13 @@ func (h *host) getMonoPlan(monoRequests types.VolumeBindings, volume *volume) (t // check if volume size is enough if volume.size < totalSize { - return nil, nil, types.ErrInsufficientResource + return nil, nil, coretypes.ErrInsufficientResource } // check if disk IOPS quota is enough disk := h.getDiskByPath(volume.device) if !h.isDiskIOPSQuotaQualified(disk, &types.VolumeBinding{SizeInBytes: totalSize, ReadIOPS: totalReadIOPS, WriteIOPS: totalWriteIOPS, ReadBPS: totalReadBPS, WriteBPS: totalWriteBPS}) { - return nil, nil, types.ErrInsufficientResource + return nil, nil, coretypes.ErrInsufficientResource } volumePlan := types.VolumePlan{} @@ -252,7 +253,7 @@ func (h *host) getNormalPlan(normalRequests types.VolumeBindings) (types.VolumeP heap.Push(&volumeHeap, volume) } if !allocated { - return nil, nil, types.ErrInsufficientResource + return nil, nil, coretypes.ErrInsufficientResource } } @@ -296,7 +297,7 @@ func (h *host) getUnlimitedPlans(normalPlans, monoPlans []types.VolumePlan, unli } volumes := append(h.usedVolumes.DeepCopy(), h.unusedVolumes.DeepCopy()...) if len(volumes) == 0 { - return nil, types.ErrInsufficientResource + return nil, coretypes.ErrInsufficientResource } volumeMap := map[string]*volume{} for _, volume := range volumes { @@ -353,7 +354,7 @@ func (h *host) getMountDiskPlan(reqs types.VolumeBindings) (types.Disks, error) for _, req := range reqs { disk := h.getDiskByPath(req.Source) if !h.isDiskIOPSQuotaQualified(disk, req) { - return nil, types.ErrInsufficientResource + return nil, coretypes.ErrInsufficientResource } h.decreaseIOPSQuota(disk, req) diskPlan.Add(types.Disks{&types.Disk{ @@ -548,8 +549,8 @@ func (h *host) getAffinityPlan(requests types.VolumeBindings, originVolumePlan t // check if the device has enough space volume := h.getVolumeByDevice(device) if req.SizeInBytes > volume.size { - log.Errorf(nil, types.ErrInsufficientResource, "[getAffinityPlan] no space to expand, %+v remains %+v, requires %+v", device, volume.size, req.SizeInBytes) //nolint - return types.ErrInsufficientResource + log.Errorf(nil, coretypes.ErrInsufficientResource, "[getAffinityPlan] no space to expand, %+v remains %+v, requires %+v", device, volume.size, req.SizeInBytes) //nolint + return coretypes.ErrInsufficientResource } volume.size -= req.SizeInBytes volumePlan.Merge(types.VolumePlan{req: types.VolumeMap{volume.device: req.SizeInBytes}}) @@ -560,8 +561,8 @@ func (h *host) getAffinityPlan(requests types.VolumeBindings, originVolumePlan t } disk := h.getDiskByPath(device) if !h.isDiskIOPSQuotaQualified(disk, req) { - log.Errorf(nil, types.ErrInsufficientResource, "[getAffinityPlan] no IOPS quota to expand, %+v remains %+v, requires %+v", device, disk, req) //nolint - return types.ErrInsufficientResource + log.Errorf(nil, coretypes.ErrInsufficientResource, "[getAffinityPlan] no IOPS quota to expand, %+v remains %+v, requires %+v", device, disk, req) //nolint + return coretypes.ErrInsufficientResource } h.decreaseIOPSQuota(disk, req) diskPlan.Add(types.Disks{&types.Disk{ @@ -612,8 +613,8 @@ func (h *host) getAffinityPlan(requests types.VolumeBindings, originVolumePlan t // if there is any affinity plan: don't reschedule // use the first volume map to get the whole mono volume plan if totalVolumeSize < totalRequestSize { // check if the volume size is enough - log.Errorf(nil, types.ErrInsufficientResource, "[getAffinityPlan] no space to expand, the size of %+v is %+v, requires %+v", affinity[monoRequests[0]].GetDevice(), totalVolumeSize, totalRequestSize) //nolint - return nil, nil, types.ErrInsufficientResource + log.Errorf(nil, coretypes.ErrInsufficientResource, "[getAffinityPlan] no space to expand, the size of %+v is %+v, requires %+v", affinity[monoRequests[0]].GetDevice(), totalVolumeSize, totalRequestSize) //nolint + return nil, nil, coretypes.ErrInsufficientResource } var volume *volume @@ -648,7 +649,7 @@ func (h *host) getAffinityPlan(requests types.VolumeBindings, originVolumePlan t volumePlans, diskPlans := h.getVolumePlans(needRescheduleRequests) if len(volumePlans) == 0 { - return nil, nil, types.ErrInsufficientResource + return nil, nil, coretypes.ErrInsufficientResource } volumePlan.Merge(volumePlans[0]) diskPlan.Add(diskPlans[0]) diff --git a/resources/volume/types/errors.go b/resources/volume/types/errors.go index 82728055f..2f03fe2b8 100644 --- a/resources/volume/types/errors.go +++ b/resources/volume/types/errors.go @@ -3,9 +3,8 @@ package types import "github.com/cockroachdb/errors" var ( - ErrInvalidCapacity = errors.New("invalid capacity") - ErrInvalidVolume = errors.New("invalid volume") - ErrInsufficientResource = errors.New("cannot alloc a plan, not enough resource") - ErrInvalidStorage = errors.New("invalid storage") - ErrInvalidDisk = errors.New("invalid disk") + ErrInvalidCapacity = errors.New("invalid capacity") + ErrInvalidVolume = errors.New("invalid volume") + ErrInvalidStorage = errors.New("invalid storage") + ErrInvalidDisk = errors.New("invalid disk") ) diff --git a/rpc/counter_test.go b/rpc/counter_test.go index f31e31514..879b38c32 100644 --- a/rpc/counter_test.go +++ b/rpc/counter_test.go @@ -9,7 +9,7 @@ import ( func TestCounter(t *testing.T) { v := Vibranium{} - task := v.newTask(context.TODO(), "test", true) + task := v.newTask(context.Background(), "test", true) assert.Equal(t, v.TaskNum, 1) task.done() diff --git a/source/common/common_test.go b/source/common/common_test.go index 8d29337a2..25c926847 100644 --- a/source/common/common_test.go +++ b/source/common/common_test.go @@ -146,14 +146,14 @@ func TestArtifact(t *testing.T) { res.Write(data) })) defer func() { testServer.Close() }() - err = g.Artifact(context.TODO(), "invaildurl", savedDir) + err = g.Artifact(context.Background(), "invaildurl", savedDir) assert.Error(t, err) // no header - err = g.Artifact(context.TODO(), testServer.URL, savedDir) + err = g.Artifact(context.Background(), testServer.URL, savedDir) assert.Error(t, err) // vaild g.AuthHeaders = map[string]string{"TEST": authValue} - err = g.Artifact(context.TODO(), testServer.URL, savedDir) + err = g.Artifact(context.Background(), testServer.URL, savedDir) assert.NoError(t, err) fname := filepath.Join(savedDir, path.Base(origFile.Name())) diff --git a/store/etcdv3/meta/etcd_test.go b/store/etcdv3/meta/etcd_test.go index 5a3ab4894..176d1257b 100644 --- a/store/etcdv3/meta/etcd_test.go +++ b/store/etcdv3/meta/etcd_test.go @@ -104,7 +104,7 @@ func TestBindStatusButEntityTxnUnsuccessful(t *testing.T) { etcd.On("Grant", mock.Anything, mock.Anything).Return(&clientv3.LeaseGrantResponse{}, nil) etcd.On("Txn", mock.Anything).Return(txn) - require.Equal(t, types.ErrMockError, e.BindStatus(context.Background(), "/entity", "/status", "status", 1)) + require.Equal(t, types.ErrInvaildCount, e.BindStatus(context.Background(), "/entity", "/status", "status", 1)) } func TestBindStatusButStatusTxnUnsuccessful(t *testing.T) { @@ -337,7 +337,7 @@ func TestETCD(t *testing.T) { require.True(t, r.Succeeded) // UpdateFail r, err = m.Update(ctx, "test/3", "b") - require.EqualError(t, err, "Key not exists") + require.EqualError(t, err, "key not exists") require.False(t, r.Succeeded) // BatchUpdate data = map[string]string{ @@ -353,7 +353,7 @@ func TestETCD(t *testing.T) { "k3": "b2", } r, err = m.BatchUpdate(ctx, data) - require.EqualError(t, err, "Key not exists") + require.EqualError(t, err, "key not exists") require.False(t, r.Succeeded) // Watch ctx2, cancel := context.WithCancel(ctx) @@ -374,54 +374,54 @@ func TestETCD(t *testing.T) { "bcad_k1": "v1", "bcad_k2": "v1", } - err = m.BatchCreateAndDecr(context.TODO(), data, "bcad_process") - require.EqualError(t, err, "Key not exists: bcad_process") + err = m.BatchCreateAndDecr(context.Background(), data, "bcad_process") + require.EqualError(t, err, "bcad_process: key not exists") // BatchCreateAndDecr error - _, err = m.Put(context.TODO(), "bcad_process", "a") + _, err = m.Put(context.Background(), "bcad_process", "a") require.NoError(t, err) - err = m.BatchCreateAndDecr(context.TODO(), data, "bcad_process") + err = m.BatchCreateAndDecr(context.Background(), data, "bcad_process") require.EqualError(t, err, "strconv.Atoi: parsing \"a\": invalid syntax") // BatchCreateAndDecr success - _, err = m.Put(context.TODO(), "bcad_process", "20") + _, err = m.Put(context.Background(), "bcad_process", "20") require.NoError(t, err) - err = m.BatchCreateAndDecr(context.TODO(), data, "bcad_process") + err = m.BatchCreateAndDecr(context.Background(), data, "bcad_process") require.NoError(t, err) - resp, err = m.Get(context.TODO(), "bcad_process") + resp, err = m.Get(context.Background(), "bcad_process") require.NoError(t, err) processCnt, err := strconv.Atoi(string(resp.Kvs[0].Value)) require.NoError(t, err) require.EqualValues(t, 19, processCnt) // BatchCreateAndDecr concurrency - _, err = m.Put(context.TODO(), "bcad_process", "200") + _, err = m.Put(context.Background(), "bcad_process", "200") require.NoError(t, err) wg := sync.WaitGroup{} for i := 0; i < 200; i++ { wg.Add(1) go func() { defer wg.Done() - m.BatchCreateAndDecr(context.TODO(), data, "bcad_process") + m.BatchCreateAndDecr(context.Background(), data, "bcad_process") }() } wg.Wait() - resp, err = m.Get(context.TODO(), "bcad_process") + resp, err = m.Get(context.Background(), "bcad_process") require.NoError(t, err) processCnt, err = strconv.Atoi(string(resp.Kvs[0].Value)) require.NoError(t, err) require.EqualValues(t, 0, processCnt) // doBatchOp error - _, err = m.doBatchOp(context.TODO(), nil) - require.EqualError(t, err, "No txn ops") + _, err = m.doBatchOp(context.Background(), nil) + require.EqualError(t, err, "no txn ops") // doBatchOp: many groups txnes := []ETCDTxn{} for i := 0; i < 999; i++ { txnes = append(txnes, ETCDTxn{Then: []clientv3.Op{clientv3.OpGet("a")}}) } - txnResp, err := m.doBatchOp(context.TODO(), txnes) + txnResp, err := m.doBatchOp(context.Background(), txnes) require.NoError(t, err) require.True(t, txnResp.Succeeded) require.EqualValues(t, 999, len(txnResp.Responses)) @@ -432,7 +432,7 @@ func TestETCD(t *testing.T) { txnes[0].Then = append(txnes[0].Then, clientv3.OpGet("a")) txnes[1].Then = append(txnes[1].Then, clientv3.OpGet("a"), clientv3.OpGet("b")) } - txnResp, err = m.doBatchOp(context.TODO(), txnes) + txnResp, err = m.doBatchOp(context.Background(), txnes) require.NoError(t, err) require.True(t, txnResp.Succeeded) require.EqualValues(t, 999*3, len(txnResp.Responses)) @@ -441,19 +441,19 @@ func TestETCD(t *testing.T) { txnes = []ETCDTxn{{If: []clientv3.Cmp{ clientv3.Compare(clientv3.Value("a"), "=", string("123")), }}} - txnResp, err = m.doBatchOp(context.TODO(), txnes) + txnResp, err = m.doBatchOp(context.Background(), txnes) require.NoError(t, err) require.False(t, txnResp.Succeeded) require.EqualValues(t, 0, len(txnResp.Responses)) // GetMulti error - _, err = m.GetMulti(context.TODO(), []string{"a", "b"}) - require.EqualError(t, err, "bad `Count` value: key: a") + _, err = m.GetMulti(context.Background(), []string{"a", "b"}) + require.EqualError(t, err, "key: a: bad `Count` value, entity count invaild") // GetMulti success - m.Put(context.TODO(), "a", "b") - m.Put(context.TODO(), "b", "c") - kvs, err := m.GetMulti(context.TODO(), []string{"a", "b"}) + m.Put(context.Background(), "a", "b") + m.Put(context.Background(), "b", "c") + kvs, err := m.GetMulti(context.Background(), []string{"a", "b"}) require.NoError(t, err) require.EqualValues(t, 2, len(kvs)) @@ -466,9 +466,9 @@ func TestETCD(t *testing.T) { "aa": {cmpValue: "!="}, "cc": {cmpValue: "!="}, } - m.Put(context.TODO(), "aa", "aa") - m.Put(context.TODO(), "cc", "cc") - txnResp, err = m.batchPut(context.TODO(), data, limit) + m.Put(context.Background(), "aa", "aa") + m.Put(context.Background(), "cc", "cc") + txnResp, err = m.batchPut(context.Background(), data, limit) require.NoError(t, err) require.True(t, txnResp.Succeeded) } diff --git a/store/etcdv3/node_test.go b/store/etcdv3/node_test.go index f787a819f..58926050e 100644 --- a/store/etcdv3/node_test.go +++ b/store/etcdv3/node_test.go @@ -205,23 +205,23 @@ func TestSetNodeStatus(t *testing.T) { Podname: "testpod", }, } - _, err := m.AddPod(context.TODO(), node.Podname, "") + _, err := m.AddPod(context.Background(), node.Podname, "") assert.NoError(err) - _, err = m.AddNode(context.TODO(), &types.AddNodeOptions{ + _, err = m.AddNode(context.Background(), &types.AddNodeOptions{ Nodename: node.Name, Endpoint: node.Endpoint, Podname: node.Podname, }) assert.NoError(err) - assert.NoError(m.SetNodeStatus(context.TODO(), node, 1)) + assert.NoError(m.SetNodeStatus(context.Background(), node, 1)) key := filepath.Join(nodeStatusPrefix, node.Name) // not expired yet - _, err = m.GetOne(context.TODO(), key) + _, err = m.GetOne(context.Background(), key) assert.NoError(err) // expired time.Sleep(2000 * time.Millisecond) - _, err = m.GetOne(context.TODO(), key) + _, err = m.GetOne(context.Background(), key) assert.Error(err) } @@ -236,24 +236,24 @@ func TestGetNodeStatus(t *testing.T) { Podname: "testpod", }, } - _, err := m.AddPod(context.TODO(), node.Podname, "") + _, err := m.AddPod(context.Background(), node.Podname, "") assert.NoError(err) - _, err = m.AddNode(context.TODO(), &types.AddNodeOptions{ + _, err = m.AddNode(context.Background(), &types.AddNodeOptions{ Nodename: node.Name, Endpoint: node.Endpoint, Podname: node.Podname, }) assert.NoError(err) - assert.NoError(m.SetNodeStatus(context.TODO(), node, 1)) + assert.NoError(m.SetNodeStatus(context.Background(), node, 1)) // not expired yet - ns, err := m.GetNodeStatus(context.TODO(), node.Name) + ns, err := m.GetNodeStatus(context.Background(), node.Name) assert.NoError(err) assert.Equal(ns.Nodename, node.Name) assert.True(ns.Alive) // expired time.Sleep(2 * time.Second) - ns1, err := m.GetNodeStatus(context.TODO(), node.Name) + ns1, err := m.GetNodeStatus(context.Background(), node.Name) assert.Error(err) assert.Nil(ns1) } @@ -270,9 +270,9 @@ func TestNodeStatusStream(t *testing.T) { }, } - _, err := m.AddPod(context.TODO(), node.Podname, "") + _, err := m.AddPod(context.Background(), node.Podname, "") assert.NoError(err) - _, err = m.AddNode(context.TODO(), &types.AddNodeOptions{ + _, err = m.AddNode(context.Background(), &types.AddNodeOptions{ Nodename: node.Name, Endpoint: node.Endpoint, Podname: node.Podname, @@ -289,7 +289,7 @@ func TestNodeStatusStream(t *testing.T) { default: } time.Sleep(500 * time.Millisecond) - assert.NoError(m.SetNodeStatus(context.TODO(), node, 1)) + assert.NoError(m.SetNodeStatus(context.Background(), node, 1)) } }() diff --git a/store/redis/node_test.go b/store/redis/node_test.go index 850d5af9c..f5fa77514 100644 --- a/store/redis/node_test.go +++ b/store/redis/node_test.go @@ -174,17 +174,17 @@ func (s *RediaronTestSuite) TestSetNodeStatus() { Podname: "testpod", }, } - s.NoError(s.rediaron.SetNodeStatus(context.TODO(), node, 1)) + s.NoError(s.rediaron.SetNodeStatus(context.Background(), node, 1)) key := filepath.Join(nodeStatusPrefix, node.Name) // not expired yet - _, err := s.rediaron.GetOne(context.TODO(), key) + _, err := s.rediaron.GetOne(context.Background(), key) s.NoError(err) // expired time.Sleep(2 * time.Second) // fastforward s.rediserver.FastForward(2 * time.Second) - _, err = s.rediaron.GetOne(context.TODO(), key) + _, err = s.rediaron.GetOne(context.Background(), key) s.Error(err) } @@ -196,10 +196,10 @@ func (s *RediaronTestSuite) TestGetNodeStatus() { Podname: "testpod", }, } - s.NoError(s.rediaron.SetNodeStatus(context.TODO(), node, 1)) + s.NoError(s.rediaron.SetNodeStatus(context.Background(), node, 1)) // not expired yet - ns, err := s.rediaron.GetNodeStatus(context.TODO(), node.Name) + ns, err := s.rediaron.GetNodeStatus(context.Background(), node.Name) s.NoError(err) s.Equal(ns.Nodename, node.Name) s.True(ns.Alive) @@ -207,7 +207,7 @@ func (s *RediaronTestSuite) TestGetNodeStatus() { time.Sleep(2 * time.Second) // fastforward s.rediserver.FastForward(2 * time.Second) - ns1, err := s.rediaron.GetNodeStatus(context.TODO(), node.Name) + ns1, err := s.rediaron.GetNodeStatus(context.Background(), node.Name) s.Error(err) s.Nil(ns1) } @@ -231,7 +231,7 @@ func (s *RediaronTestSuite) TestNodeStatusStream() { default: } time.Sleep(500 * time.Millisecond) - s.NoError(s.rediaron.SetNodeStatus(context.TODO(), node, 1)) + s.NoError(s.rediaron.SetNodeStatus(context.Background(), node, 1)) // manually trigger triggerMockedKeyspaceNotification(s.rediaron.cli, filepath.Join(nodeStatusPrefix, node.Name), actionSet) } diff --git a/strategy/average_test.go b/strategy/average_test.go index eee53ce36..cf7e285b9 100644 --- a/strategy/average_test.go +++ b/strategy/average_test.go @@ -5,13 +5,15 @@ import ( "sort" "testing" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/types" "github.com/stretchr/testify/assert" ) func TestAveragePlan(t *testing.T) { // 正常的 nodes := deployedNodes() - r, err := AveragePlan(context.TODO(), nodes, 1, 0, 0) + r, err := AveragePlan(context.Background(), nodes, 1, 0, 0) assert.NoError(t, err) finalCounts := []int{} for _, node := range nodes { @@ -22,23 +24,23 @@ func TestAveragePlan(t *testing.T) { // nodes len < limit nodes = deployedNodes() - _, err = AveragePlan(context.TODO(), nodes, 100, 0, 5) + _, err = AveragePlan(context.Background(), nodes, 100, 0, 5) assert.Error(t, err) // 超过 cap nodes = deployedNodes() - _, err = AveragePlan(context.TODO(), nodes, 100, 0, 0) + _, err = AveragePlan(context.Background(), nodes, 100, 0, 0) assert.Error(t, err) - assert.Contains(t, err.Error(), "not enough capacity") + assert.True(t, errors.Is(err, types.ErrInsufficientCapacity)) // 正常 limit nodes = deployedNodes() - _, err = AveragePlan(context.TODO(), nodes, 1, 1, 1) + _, err = AveragePlan(context.Background(), nodes, 1, 1, 1) assert.NoError(t, err) nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) - _, err = AveragePlan(context.TODO(), nodes, 4, 100, 4) - assert.EqualError(t, err, "not enough resource: not enough nodes with capacity of 4, require 4 nodes") + _, err = AveragePlan(context.Background(), nodes, 4, 100, 4) + assert.Contains(t, err.Error(), "not enough nodes with capacity of 4, require 4 nodes") nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) - _, err = AveragePlan(context.TODO(), nodes, 2, 100, 0) - assert.EqualError(t, err, "not enough resource: not enough nodes with capacity of 2, require 5 nodes") + _, err = AveragePlan(context.Background(), nodes, 2, 100, 0) + assert.Contains(t, err.Error(), "not enough nodes with capacity of 2, require 5 nodes") } diff --git a/strategy/communism_test.go b/strategy/communism_test.go index 26400dbad..fd7d91157 100644 --- a/strategy/communism_test.go +++ b/strategy/communism_test.go @@ -11,37 +11,75 @@ import ( func TestCommunismPlan(t *testing.T) { nodes := deployedNodes() - r, err := CommunismPlan(context.TODO(), nodes, 1, 100, 0) + r, err := CommunismPlan(context.Background(), nodes, 1, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{3, 3, 5, 7}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 2, 1, 0) + r, err = CommunismPlan(context.Background(), nodes, 2, 1, 0) assert.Error(t, err) - r, err = CommunismPlan(context.TODO(), nodes, 2, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 2, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{3, 4, 5, 7}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 3, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 3, 100, 0) assert.ElementsMatch(t, []int{4, 4, 5, 7}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 4, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 4, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{4, 5, 5, 7}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 29, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 29, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{11, 11, 12, 12}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 37, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 37, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{12, 13, 14, 15}, getFinalStatus(r, nodes)) - r, err = CommunismPlan(context.TODO(), nodes, 40, 100, 0) + r, err = CommunismPlan(context.Background(), nodes, 40, 100, 0) assert.NoError(t, err) assert.ElementsMatch(t, []int{12, 13, 15, 17}, getFinalStatus(r, nodes)) } +func TestCommunismPlanCapacityPriority(t *testing.T) { + nodes := genNodesByCapCount([]int{1, 2, 1, 5, 10}, []int{0, 0, 0, 0, 0}) + deploy, err := CommunismPlan(context.Background(), nodes, 3, 15, 0) + assert.Nil(t, err) + assert.ElementsMatch(t, []int{0, 0, 1, 1, 1}, getFinalStatus(deploy, nodes)) + assert.EqualValues(t, 1, deploy["1"]) + assert.EqualValues(t, 1, deploy["3"]) + assert.EqualValues(t, 1, deploy["4"]) + + nodes = genNodesByCapCount([]int{10, 4, 4}, []int{1, 1, 10}) + deploy, err = CommunismPlan(context.Background(), nodes, 5, 100, 0) + assert.Nil(t, err) + assert.ElementsMatch(t, []int{3, 4, 10}, getFinalStatus(deploy, nodes)) + assert.EqualValues(t, 3, deploy["0"]) + assert.EqualValues(t, 2, deploy["1"]) + + nodes = genNodesByCapCount([]int{4, 5, 4, 10}, []int{2, 2, 4, 0}) + deploy, err = CommunismPlan(context.Background(), nodes, 3, 100, 0) + assert.Nil(t, err) + assert.ElementsMatch(t, []int{2, 2, 3, 4}, getFinalStatus(deploy, nodes)) + assert.EqualValues(t, 3, deploy["3"]) + + nodes = genNodesByCapCount([]int{3, 4, 5, 10}, []int{0, 0, 0, 0}) + deploy, err = CommunismPlan(context.Background(), nodes, 3, 100, 0) + assert.Nil(t, err) + assert.ElementsMatch(t, []int{0, 1, 1, 1}, getFinalStatus(deploy, nodes)) + assert.EqualValues(t, 1, deploy["3"]) + assert.EqualValues(t, 1, deploy["2"]) + assert.EqualValues(t, 1, deploy["1"]) + + // test limit + nodes = genNodesByCapCount([]int{3, 4, 5, 10}, []int{3, 5, 7, 10}) + deploy, err = CommunismPlan(context.Background(), nodes, 3, 10, 5) + assert.Contains(t, err.Error(), "reached nodelimit, a node can host at most 5 instances") + deploy, err = CommunismPlan(context.Background(), nodes, 3, 10, 6) + assert.Nil(t, err) +} + // //func randomDeployStatus(scheduleInfos []resourcetypes.ScheduleInfo, maxDeployed int) (sis []Info) { // s := rand.NewSource(int64(1024)) @@ -55,25 +93,24 @@ func TestCommunismPlan(t *testing.T) { // return //} -// -//func Benchmark_CommunismPlan(b *testing.B) { -// b.StopTimer() -// var count = 10000 -// var maxDeployed = 1024 -// var volTotal = maxDeployed * count -// var need = volTotal - 1 -// // Simulate `count` nodes with difference deploy status, each one can deploy `maxDeployed` workloads -// // and then we deploy `need` workloads -// for i := 0; i < b.N; i++ { -// // 24 core, 128G memory, 10 pieces per core -// t := utils.GenerateScheduleInfos(count, 1, 1, 0, 10) -// hugePod := randomDeployStatus(t, maxDeployed) -// b.StartTimer() -// _, err := CommunismPlan(context.TODO(), hugePod, need, 100, 0) +// func Benchmark_CommunismPlan(b *testing.B) { // b.StopTimer() -// assert.NoError(b, err) +// var count = 10000 +// var maxDeployed = 1024 +// var volTotal = maxDeployed * count +// var need = volTotal - 1 +// // Simulate `count` nodes with difference deploy status, each one can deploy `maxDeployed` workloads +// // and then we deploy `need` workloads +// for i := 0; i < b.N; i++ { +// // 24 core, 128G memory, 10 pieces per core +// t := utils.GenerateScheduleInfos(count, 1, 1, 0, 10) +// hugePod := randomDeployStatus(t, maxDeployed) +// b.StartTimer() +// _, err := CommunismPlan(context.Background(), hugePod, need, 100, 0) +// b.StopTimer() +// assert.NoError(b, err) +// } // } -//} func genNodesByCapCount(caps, counts []int) (infos []Info) { for i := range caps { infos = append(infos, Info{ @@ -92,42 +129,3 @@ func getFinalStatus(deploy map[string]int, infos []Info) (counts []int) { sort.Ints(counts) return } - -func TestCommunismPlanCapacityPriority(t *testing.T) { - - nodes := genNodesByCapCount([]int{1, 2, 1, 5, 10}, []int{0, 0, 0, 0, 0}) - deploy, err := CommunismPlan(context.TODO(), nodes, 3, 15, 0) - assert.Nil(t, err) - assert.ElementsMatch(t, []int{0, 0, 1, 1, 1}, getFinalStatus(deploy, nodes)) - assert.EqualValues(t, 1, deploy["1"]) - assert.EqualValues(t, 1, deploy["3"]) - assert.EqualValues(t, 1, deploy["4"]) - - nodes = genNodesByCapCount([]int{10, 4, 4}, []int{1, 1, 10}) - deploy, err = CommunismPlan(context.TODO(), nodes, 5, 100, 0) - assert.Nil(t, err) - assert.ElementsMatch(t, []int{3, 4, 10}, getFinalStatus(deploy, nodes)) - assert.EqualValues(t, 3, deploy["0"]) - assert.EqualValues(t, 2, deploy["1"]) - - nodes = genNodesByCapCount([]int{4, 5, 4, 10}, []int{2, 2, 4, 0}) - deploy, err = CommunismPlan(context.TODO(), nodes, 3, 100, 0) - assert.Nil(t, err) - assert.ElementsMatch(t, []int{2, 2, 3, 4}, getFinalStatus(deploy, nodes)) - assert.EqualValues(t, 3, deploy["3"]) - - nodes = genNodesByCapCount([]int{3, 4, 5, 10}, []int{0, 0, 0, 0}) - deploy, err = CommunismPlan(context.TODO(), nodes, 3, 100, 0) - assert.Nil(t, err) - assert.ElementsMatch(t, []int{0, 1, 1, 1}, getFinalStatus(deploy, nodes)) - assert.EqualValues(t, 1, deploy["3"]) - assert.EqualValues(t, 1, deploy["2"]) - assert.EqualValues(t, 1, deploy["1"]) - - // test limit - nodes = genNodesByCapCount([]int{3, 4, 5, 10}, []int{3, 5, 7, 10}) - deploy, err = CommunismPlan(context.TODO(), nodes, 3, 10, 5) - assert.EqualError(t, err, "reached nodelimit, a node can host at most 5 instances: not enough resource") - deploy, err = CommunismPlan(context.TODO(), nodes, 3, 10, 6) - assert.Nil(t, err) -} diff --git a/strategy/fill_test.go b/strategy/fill_test.go index 91c4ce763..1b4da9381 100644 --- a/strategy/fill_test.go +++ b/strategy/fill_test.go @@ -15,7 +15,7 @@ func TestFillPlan(t *testing.T) { // 正常的全量补充 n := 10 nodes := deployedNodes() - r, err := FillPlan(context.TODO(), nodes, n, 0, 0) + r, err := FillPlan(context.Background(), nodes, n, 0, 0) assert.NoError(t, err) finalCounts := []int{} for _, node := range nodes { @@ -27,7 +27,7 @@ func TestFillPlan(t *testing.T) { // 局部补充 n = 5 nodes = deployedNodes() - r, err = FillPlan(context.TODO(), nodes, n, 0, 0) + r, err = FillPlan(context.Background(), nodes, n, 0, 0) assert.NoError(t, err) finalCounts = []int{} for _, node := range nodes { @@ -39,20 +39,20 @@ func TestFillPlan(t *testing.T) { // 局部补充不能 n = 15 nodes = deployedNodes() - _, err = FillPlan(context.TODO(), nodes, n, 0, 0) + _, err = FillPlan(context.Background(), nodes, n, 0, 0) assert.True(t, errors.Is(err, types.ErrInsufficientResource)) // 全局补充不能 n = 1 nodes = deployedNodes() - _, err = FillPlan(context.TODO(), nodes, n, 0, 0) + _, err = FillPlan(context.Background(), nodes, n, 0, 0) assert.Error(t, err) assert.Contains(t, err.Error(), "each node has enough workloads") // LimitNode n = 10 nodes = deployedNodes() - _, err = FillPlan(context.TODO(), nodes, n, 0, 2) + _, err = FillPlan(context.Background(), nodes, n, 0, 2) assert.NoError(t, err) // 局部补充 @@ -70,18 +70,18 @@ func TestFillPlan(t *testing.T) { }, } - _, err = FillPlan(context.TODO(), nodes, n, 0, 3) + _, err = FillPlan(context.Background(), nodes, n, 0, 3) assert.Error(t, err) assert.Contains(t, err.Error(), "cannot alloc a fill node plan") nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) - r, err = FillPlan(context.TODO(), nodes, 4, 0, 3) + r, err = FillPlan(context.Background(), nodes, 4, 0, 3) assert.Nil(t, err) assert.ElementsMatch(t, []int{3, 3, 4, 4, 4}, getFinalStatus(r, nodes)) assert.EqualValues(t, 1, r["4"]) assert.EqualValues(t, 1, r["3"]) assert.EqualValues(t, 1, r["2"]) - _, err = FillPlan(context.TODO(), nodes, 5, 1000, 0) - assert.EqualError(t, err, "not enough resource: not enough nodes that can fill up to 5 instances, require 1 nodes") + _, err = FillPlan(context.Background(), nodes, 5, 1000, 0) + assert.Contains(t, err.Error(), "not enough nodes that can fill up to 5 instances, require 1 nodes") } diff --git a/strategy/global_test.go b/strategy/global_test.go index c7379a030..e54cf9970 100644 --- a/strategy/global_test.go +++ b/strategy/global_test.go @@ -30,7 +30,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 1, } arg := []Info{n1, n2, n3} - r, err := GlobalPlan(context.TODO(), arg, 3, 100, 0) + r, err := GlobalPlan(context.Background(), arg, 3, 100, 0) assert.NoError(t, err) assert.Equal(t, r, map[string]int{"n1": 1, "n2": 2}) @@ -54,7 +54,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 1, } arg = []Info{n1, n2, n3} - r, err = GlobalPlan(context.TODO(), arg, 3, 100, 0) + r, err = GlobalPlan(context.Background(), arg, 3, 100, 0) assert.Equal(t, r, map[string]int{"n1": 2, "n2": 1}) // insufficient total @@ -77,7 +77,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 1, } arg = []Info{n1, n2, n3} - r, err = GlobalPlan(context.TODO(), arg, 100, 6, 0) + r, err = GlobalPlan(context.Background(), arg, 100, 6, 0) assert.ErrorIs(t, err, types.ErrInsufficientResource) // fake total @@ -100,7 +100,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 1, } arg = []Info{n1, n2, n3} - r, err = GlobalPlan(context.TODO(), arg, 10, 100, 0) + r, err = GlobalPlan(context.Background(), arg, 10, 100, 0) assert.ErrorIs(t, err, types.ErrInsufficientResource) // small rate @@ -123,7 +123,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 1e10, } arg = []Info{n1, n2, n3} - r, err = GlobalPlan(context.TODO(), arg, 10, 100, 0) + r, err = GlobalPlan(context.Background(), arg, 10, 100, 0) assert.NoError(t, err) assert.Equal(t, r, map[string]int{"n2": 10}) @@ -141,7 +141,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 100, } arg = []Info{n2, n1} - r, err = GlobalPlan(context.TODO(), arg, 2, 100, 0) + r, err = GlobalPlan(context.Background(), arg, 2, 100, 0) assert.NoError(t, err) assert.Equal(t, r, map[string]int{"n2": 2}) @@ -153,7 +153,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 100, } - r, err = GlobalPlan(context.TODO(), []Info{n1}, 1, 100, 0) + r, err = GlobalPlan(context.Background(), []Info{n1}, 1, 100, 0) assert.NoError(t, err) assert.Equal(t, r["n1"], 1) @@ -165,7 +165,7 @@ func TestGlobalPlan1(t *testing.T) { Capacity: 100, Count: 21, } - r, err = GlobalPlan(context.TODO(), []Info{n1}, 10, 100, 0) + r, err = GlobalPlan(context.Background(), []Info{n1}, 10, 100, 0) assert.NoError(t, err) assert.Equal(t, r["n1"], 10) } @@ -194,7 +194,7 @@ func TestGlobalIssue455(t *testing.T) { Count: 6, }, } - deployMap, err := GlobalPlan(context.TODO(), infos, 1, 19308043, 1) + deployMap, err := GlobalPlan(context.Background(), infos, 1, 19308043, 1) assert.NoError(t, err) assert.EqualValues(t, 1, deployMap["spp-qa-vm-node-1"]) } diff --git a/strategy/strategy_test.go b/strategy/strategy_test.go index 1ac18d79d..90e3460d1 100644 --- a/strategy/strategy_test.go +++ b/strategy/strategy_test.go @@ -33,7 +33,7 @@ func deployedNodes() []Info { } func TestDeploy(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() // invaild strategy _, err := Deploy(ctx, "invalid", -1, 3, nil, 2) diff --git a/utils/utils_test.go b/utils/utils_test.go index e961fd644..0ed53734a 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -131,15 +131,15 @@ func TestMetaInLabel(t *testing.T) { meta := &types.LabelMeta{ Publish: []string{"1", "2"}, } - r := EncodeMetaInLabel(context.TODO(), meta) + r := EncodeMetaInLabel(context.Background(), meta) assert.NotEmpty(t, r) labels := map[string]string{ cluster.LabelMeta: "{\"Publish\":[\"5001\"],\"HealthCheck\":{\"TCPPorts\":[\"5001\"],\"HTTPPort\":\"\",\"HTTPURL\":\"\",\"HTTPCode\":0}}", } - meta2 := DecodeMetaInLabel(context.TODO(), labels) + meta2 := DecodeMetaInLabel(context.Background(), labels) assert.Equal(t, meta2.Publish[0], "5001") - meta3 := DecodeMetaInLabel(context.TODO(), map[string]string{cluster.LabelMeta: ""}) + meta3 := DecodeMetaInLabel(context.Background(), map[string]string{cluster.LabelMeta: ""}) assert.Nil(t, meta3.HealthCheck) } @@ -195,9 +195,9 @@ func TestMergeHookOutputs(t *testing.T) { } func TestEnsureReaderClosed(t *testing.T) { - EnsureReaderClosed(context.TODO(), nil) + EnsureReaderClosed(context.Background(), nil) s := io.NopCloser(bytes.NewBuffer([]byte{10, 10, 10})) - EnsureReaderClosed(context.TODO(), s) + EnsureReaderClosed(context.Background(), s) } func TestRange(t *testing.T) { diff --git a/wal/hydro_test.go b/wal/hydro_test.go index f7deb3cfb..609e1ba72 100644 --- a/wal/hydro_test.go +++ b/wal/hydro_test.go @@ -73,7 +73,7 @@ func TestRecoverFailedAsNoSuchHandler(t *testing.T) { hydro.Del(eventype) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) assert.True(t, encoded) assert.False(t, decoded) assert.False(t, checked) @@ -97,7 +97,7 @@ func TestRecoverFailedAsCheckError(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, commit) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) assert.True(t, encoded) assert.True(t, decoded) assert.True(t, checked) @@ -144,7 +144,7 @@ func TestRecoverFailedAsDecodeLogError(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, commit) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) assert.True(t, encoded) assert.True(t, decoded) assert.False(t, checked) @@ -170,7 +170,7 @@ func TestHydroRecoverDiscardNoNeedEvent(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, commit) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) assert.True(t, encoded) assert.True(t, decoded) assert.True(t, checked) @@ -190,7 +190,7 @@ func TestHydroRecover(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, commit) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) assert.True(t, encoded) assert.True(t, decoded) assert.True(t, checked) @@ -234,7 +234,7 @@ func TestHydroRecoverWithRealLithium(t *testing.T) { hydro.Log(handler.event, struct{}{}) hydro.Log(handler.event, struct{}{}) - hydro.Recover(context.TODO()) + hydro.Recover(context.Background()) ch, _ := hydro.store.Scan([]byte(eventPrefix)) select { diff --git a/wal/wal_test.go b/wal/wal_test.go index 5ee49ee9d..0127a78e6 100644 --- a/wal/wal_test.go +++ b/wal/wal_test.go @@ -62,7 +62,7 @@ func TestRecover(t *testing.T) { wal.Log(eventype, struct{}{}) - wal.Recover(context.TODO()) + wal.Recover(context.Background()) assert.True(t, checked) assert.True(t, handled) assert.True(t, encoded)