diff --git a/cluster/calcium/lock.go b/cluster/calcium/lock.go index 522322ff6..0b9934b9e 100644 --- a/cluster/calcium/lock.go +++ b/cluster/calcium/lock.go @@ -38,7 +38,7 @@ func (c *Calcium) doUnlock(ctx context.Context, lock lock.DistributedLock, msg s return errors.WithStack(lock.Unlock(ctx)) } -func (c *Calcium) doUnlockAll(ctx context.Context, locks map[string]lock.DistributedLock, order []string) { +func (c *Calcium) doUnlockAll(ctx context.Context, locks map[string]lock.DistributedLock, order ...string) { // unlock in the reverse order if len(order) != len(locks) { log.Warn(ctx, "[doUnlockAll] order length not match lock map") @@ -88,7 +88,7 @@ func (c *Calcium) withWorkloadsLocked(ctx context.Context, ids []string, f func( defer log.Debugf(ctx, "[withWorkloadsLocked] Workloads %+v unlocked", ids) defer func() { utils.Reverse(ids) - c.doUnlockAll(utils.InheritTracingInfo(ctx, context.Background()), locks, ids) + c.doUnlockAll(utils.InheritTracingInfo(ctx, context.Background()), locks, ids...) }() cs, err := c.GetWorkloads(ctx, ids) if err != nil { @@ -116,7 +116,7 @@ func (c *Calcium) withNodesLocked(ctx context.Context, nf types.NodeFilter, f fu defer log.Debugf(ctx, "[withNodesLocked] Nodes %+v unlocked", nf) defer func() { utils.Reverse(nodenames) - c.doUnlockAll(utils.InheritTracingInfo(ctx, context.Background()), locks, nodenames) + c.doUnlockAll(utils.InheritTracingInfo(ctx, context.Background()), locks, nodenames...) }() ns, err := c.filterNodes(ctx, nf) diff --git a/cluster/calcium/realloc_test.go b/cluster/calcium/realloc_test.go index 7bd940af7..62e22cac3 100644 --- a/cluster/calcium/realloc_test.go +++ b/cluster/calcium/realloc_test.go @@ -97,27 +97,29 @@ func TestRealloc(t *testing.T) { } } - store.On("GetWorkloads", mock.Anything, []string{"c1"}).Return(newC1, nil) + store.On("GetWorkload", mock.Anything, "c1").Return(newC1(context.TODO(), nil)[0], nil) + + // failed by GetNode + store.On("GetNode", mock.Anything, "node1").Return(nil, types.ErrNoETCD).Once() + err := c.ReallocResource(ctx, newReallocOptions("c1", 0.1, 2*int64(units.GiB), nil, types.TriKeep, types.TriKeep)) + assert.EqualError(t, err, "ETCD must be set") + store.AssertExpectations(t) + store.On("GetNode", mock.Anything, "node1").Return(node1, nil) + // failed by lock store.On("CreateLock", mock.Anything, mock.Anything).Return(nil, types.ErrNoETCD).Once() - err := c.ReallocResource(ctx, newReallocOptions("c1", -1, 2*int64(units.GiB), nil, types.TriKeep, types.TriKeep)) + err = c.ReallocResource(ctx, newReallocOptions("c1", -1, 2*int64(units.GiB), nil, types.TriKeep, types.TriKeep)) assert.EqualError(t, err, "ETCD must be set") store.AssertExpectations(t) store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) // failed by newCPU < 0 + store.On("GetWorkloads", mock.Anything, []string{"c1"}).Return(newC1, nil) err = c.ReallocResource(ctx, newReallocOptions("c1", -1, 2*int64(units.GiB), nil, types.TriKeep, types.TriKeep)) assert.EqualError(t, err, "limit or request less than 0: bad `CPU` value") store.AssertExpectations(t) - // failed by GetNode - store.On("GetNode", mock.Anything, "node1").Return(nil, types.ErrNoETCD).Once() - err = c.ReallocResource(ctx, newReallocOptions("c1", 0.1, 2*int64(units.GiB), nil, types.TriKeep, types.TriKeep)) - assert.EqualError(t, err, "ETCD must be set") - store.AssertExpectations(t) - // failed by no enough mem - store.On("GetNode", mock.Anything, "node1").Return(node1, nil) simpleMockScheduler := &schedulermocks.Scheduler{} scheduler.InitSchedulerV1(simpleMockScheduler) c.scheduler = simpleMockScheduler @@ -165,6 +167,7 @@ func TestRealloc(t *testing.T) { }, Engine: engine, } + store.On("GetWorkload", mock.Anything, "c2").Return(newC2(nil, nil)[0], nil) store.On("GetWorkloads", mock.Anything, []string{"c2"}).Return(newC2, nil) err = c.ReallocResource(ctx, newReallocOptions("c2", 0.1, 2*int64(units.MiB), nil, types.TriKeep, types.TriKeep)) assert.EqualError(t, err, "workload ID must be length of 64") @@ -251,6 +254,7 @@ func TestRealloc(t *testing.T) { simpleMockScheduler.On("ReselectCPUNodes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(scheduleInfos[0], nodeCPUPlans, 2, nil) simpleMockScheduler.On("ReselectVolumeNodes", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(scheduleInfos[0], nodeVolumePlans, 2, nil).Once() store.On("GetNode", mock.Anything, "node2").Return(node2, nil) + store.On("GetWorkload", mock.Anything, "c3").Return(c3, nil) store.On("GetWorkloads", mock.Anything, []string{"c3"}).Return([]*types.Workload{c3}, nil) store.On("UpdateWorkload", mock.Anything, mock.Anything).Return(types.ErrBadWorkloadID).Times(1) err = c.ReallocResource(ctx, newReallocOptions("c3", 0.1, 2*int64(units.MiB), types.MustToVolumeBindings([]string{"AUTO:/data0:rw:-50"}), types.TriKeep, types.TriKeep)) @@ -341,6 +345,7 @@ func TestReallocBindCpu(t *testing.T) { } store.On("GetNode", mock.Anything, "node3").Return(node3, nil) + store.On("GetWorkload", mock.Anything, "c5").Return(c5, nil) store.On("GetWorkloads", mock.Anything, []string{"c5"}).Return([]*types.Workload{c5}, nil) engine.On("VirtualizationUpdateResource", mock.Anything, mock.Anything, mock.Anything).Return(nil) store.On("UpdateWorkload", mock.Anything, mock.Anything).Return(nil) @@ -358,6 +363,7 @@ func TestReallocBindCpu(t *testing.T) { assert.Equal(t, 0, len(c5.ResourceMeta.CPU)) store.AssertExpectations(t) + store.On("GetWorkload", mock.Anything, "c6").Return(c6, nil) store.On("GetWorkloads", mock.Anything, []string{"c6"}).Return([]*types.Workload{c6}, nil) err = c.ReallocResource(ctx, newReallocOptions("c6", 0.1, 2*int64(units.MiB), nil, types.TriTrue, types.TriKeep)) assert.NoError(t, err) diff --git a/scheduler/complex/potassium.go b/scheduler/complex/potassium.go index 78d494fd4..8eb88193d 100644 --- a/scheduler/complex/potassium.go +++ b/scheduler/complex/potassium.go @@ -124,6 +124,7 @@ func (m *Potassium) SelectCPUNodes(ctx context.Context, scheduleInfos []resource // ReselectCPUNodes used for realloc one container with cpu affinity func (m *Potassium) ReselectCPUNodes(ctx context.Context, scheduleInfo resourcetypes.ScheduleInfo, CPU types.CPUMap, quota float64, memory int64) (resourcetypes.ScheduleInfo, map[string][]types.CPUMap, int, error) { + log.Infof(ctx, "[SelectCPUNodes] scheduleInfo %v, need cpu %f, need memory %d, existing %v", scheduleInfo, quota, memory, CPU) var affinityPlan types.CPUMap // remaining quota that's impossible to achieve affinity if scheduleInfo, quota, affinityPlan = cpuReallocPlan(scheduleInfo, quota, CPU, int64(m.sharebase)); quota == 0 { @@ -325,7 +326,7 @@ func (m *Potassium) SelectVolumeNodes(ctx context.Context, scheduleInfos []resou // ReselectVolumeNodes is used for realloc only func (m *Potassium) ReselectVolumeNodes(ctx context.Context, scheduleInfo resourcetypes.ScheduleInfo, existing types.VolumePlan, vbsReq types.VolumeBindings) (resourcetypes.ScheduleInfo, map[string][]types.VolumePlan, int, error) { - + log.Infof(ctx, "[ReselectVolumeNodes] scheduleInfo %v, need volume: %v, existing %v", scheduleInfo, vbsReq.ToStringSlice(true, true), existing.ToLiteral()) affinityPlan := types.VolumePlan{} needReschedule := types.VolumeBindings{} norm, mono, unlim := distinguishVolumeBindings(vbsReq) diff --git a/utils/utils.go b/utils/utils.go index 1b953ff67..71081a45d 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -297,6 +297,7 @@ func Reverse(s interface{}) { } // Unique return a index, where s[:index] is a unique slice +// Unique requires sorted slice func Unique(s interface{}, getVal func(int) string) (j int) { n := reflect.ValueOf(s).Len() swap := reflect.Swapper(s) diff --git a/utils/utils_test.go b/utils/utils_test.go index e149b39a3..c25c92f73 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "os" + "reflect" "strconv" "testing" @@ -232,3 +233,30 @@ func TestRange(t *testing.T) { assert.Equal(t, i, res[i]) } } + +func TestReverse(t *testing.T) { + s1 := []string{"a", "b", "c"} + Reverse(s1) + assert.True(t, reflect.DeepEqual(s1, []string{"c", "b", "a"})) + + s2 := []string{} + Reverse(s2) + + s3 := []int{1, 2, 3, 4} + Reverse(s3) + assert.True(t, reflect.DeepEqual(s3, []int{4, 3, 2, 1})) +} + +func TestUnique(t *testing.T) { + s1 := []int64{1, 2, 3} + s1 = s1[:Unique(s1, func(i int) string { return strconv.Itoa(int(s1[i])) })] + assert.True(t, reflect.DeepEqual(s1, []int64{1, 2, 3})) + + s2 := []string{"a", "a", "a", "b", "b", "c"} + s2 = s2[:Unique(s2, func(i int) string { return s2[i] })] + assert.True(t, reflect.DeepEqual(s2, []string{"a", "b", "c"})) + + s3 := []string{"", "1", "1", "1", "1"} + s3 = s3[:Unique(s3, func(i int) string { return s3[i] })] + assert.True(t, reflect.DeepEqual(s3, []string{"", "1"})) +}