Skip to content

Commit

Permalink
unittests: fix resource bugs, add more tests
Browse files Browse the repository at this point in the history
  • Loading branch information
CMGS committed Nov 25, 2020
1 parent beda46b commit 173abec
Show file tree
Hide file tree
Showing 5 changed files with 85 additions and 19 deletions.
34 changes: 31 additions & 3 deletions resources/cpumem/cpumem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,26 @@ func TestType(t *testing.T) {
assert.True(t, req.Type()&types.ResourceCPUBind > 0)
}

func TestRate(t *testing.T) {
req, err := MakeRequest(types.ResourceOptions{
MemoryRequest: 0,
MemoryLimit: 1,
})
assert.Nil(t, err)
node := types.Node{
InitCPU: types.CPUMap{"1": 100, "2": 100},
InitMemCap: 100,
}
assert.Equal(t, req.Rate(node), 0.01)
req, err = MakeRequest(types.ResourceOptions{
CPUQuotaRequest: 0,
CPUQuotaLimit: 2,
CPUBind: true,
})
assert.Nil(t, err)
assert.Equal(t, req.Rate(node), 1.0)
}

func TestRequestCpuNode(t *testing.T) {
run(t, newRequestCPUNodeTest())
}
Expand Down Expand Up @@ -104,15 +124,20 @@ type nodeSchdulerTest interface {
}

func run(t *testing.T, test nodeSchdulerTest) {
resourceRequest, err := MakeRequest(test.getRequestOptions())
assert.NoError(t, err)
_, _, err = resourceRequest.MakeScheduler()([]types.NodeInfo{})
assert.Error(t, err)

s := test.getScheduler()
prevSche, _ := scheduler.GetSchedulerV1()
scheduler.InitSchedulerV1(s)
defer func() {
scheduler.InitSchedulerV1(prevSche)
}()

resourceRequest, err := MakeRequest(test.getRequestOptions())
assert.Nil(t, err)
resourceRequest, err = MakeRequest(test.getRequestOptions())
assert.NoError(t, err)

sche := resourceRequest.MakeScheduler()

Expand All @@ -121,6 +146,9 @@ func run(t *testing.T, test nodeSchdulerTest) {

var node = test.getNode()

assert.True(t, plans.Type()&(types.ResourceCPU|types.ResourceMemory) > 0)
assert.NotNil(t, plans.Capacity())

plans.ApplyChangesOnNode(node, 0)
test.assertAfterChanges(t)

Expand Down Expand Up @@ -161,7 +189,7 @@ func newRequestCPUNodeTest() nodeSchdulerTest {
CPUPlan: []types.CPUMap{{"0": 10000, "1": 10000}},
},
},
cpuMap: map[string][]types.CPUMap{"TestNode": []types.CPUMap{{"0": 10000, "1": 10000}}},
cpuMap: map[string][]types.CPUMap{"TestNode": {{"0": 10000, "1": 10000}}},
}
}

Expand Down
2 changes: 1 addition & 1 deletion resources/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (s storageRequest) MakeScheduler() resourcetypes.SchedulerV2 {

// Rate .
func (s storageRequest) Rate(node types.Node) float64 {
return float64(0) / float64(node.Volume.Total())
return float64(s.request) / float64(node.InitStorageCap)
}

// ResourcePlans .
Expand Down
37 changes: 29 additions & 8 deletions resources/storage/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,24 @@ func TestMakeRequest(t *testing.T) {
StorageLimit: 0,
})
assert.Nil(t, err)

_, err = MakeRequest(types.ResourceOptions{
StorageRequest: 2024,
StorageLimit: 1024,
})
assert.Nil(t, err)
}

func TestRate(t *testing.T) {
req, err := MakeRequest(types.ResourceOptions{
StorageRequest: 1024,
StorageLimit: 1024,
})
assert.Nil(t, err)
node := types.Node{
InitStorageCap: 1024,
}
assert.Equal(t, req.Rate(node), 1.0)
}

func TestStorage(t *testing.T) {
Expand All @@ -57,21 +75,22 @@ func TestStorage(t *testing.T) {
"SelectStorageNodes", mock.Anything, mock.Anything,
).Return(nodeInfos, 1, nil)

prevSche, _ := scheduler.GetSchedulerV1()
scheduler.InitSchedulerV1(mockScheduler)
defer func() {
scheduler.InitSchedulerV1(prevSche)
}()

resourceRequest, err := MakeRequest(types.ResourceOptions{
StorageRequest: 1024,
StorageLimit: 1024,
})
assert.Nil(t, err)
assert.NoError(t, err)
_, _, err = resourceRequest.MakeScheduler()([]types.NodeInfo{})
assert.Error(t, err)

assert.True(t, resourceRequest.Type()&types.ResourceStorage > 0)
prevSche, _ := scheduler.GetSchedulerV1()
scheduler.InitSchedulerV1(mockScheduler)
defer func() {
scheduler.InitSchedulerV1(prevSche)
}()

sche := resourceRequest.MakeScheduler()

plans, _, err := sche(nodeInfos)
assert.Nil(t, err)

Expand All @@ -87,6 +106,8 @@ func TestStorage(t *testing.T) {

assert.True(t, plans.Type()&types.ResourceStorage > 0)

assert.NotNil(t, plans.Capacity())

plans.ApplyChangesOnNode(&node, 0)
assert.Less(t, node.StorageCap, storage)

Expand Down
6 changes: 5 additions & 1 deletion resources/volume/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,11 @@ func (v volumeRequest) MakeScheduler() resourcetypes.SchedulerV2 {

// Rate .
func (v volumeRequest) Rate(node types.Node) float64 {
return float64(node.VolumeUsed) / float64(node.Volume.Total())
var totalRequest int64
for i := 0; i < v.requests; i++ {
totalRequest += v.request[i].SizeInBytes
}
return float64(totalRequest) / float64(node.Volume.Total())
}

// ResourcePlans .
Expand Down
25 changes: 19 additions & 6 deletions resources/volume/volume_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (
func TestMakeRequest(t *testing.T) {
_, err := MakeRequest(types.ResourceOptions{
VolumeLimit: []*types.VolumeBinding{
&types.VolumeBinding{
{
Source: "/data1",
Destination: "/data1",
},
Expand All @@ -25,17 +25,17 @@ func TestMakeRequest(t *testing.T) {

_, err = MakeRequest(types.ResourceOptions{
VolumeRequest: []*types.VolumeBinding{
&types.VolumeBinding{
{
Source: "/data1",
Destination: "/data1",
},
},
VolumeLimit: []*types.VolumeBinding{
&types.VolumeBinding{
{
Source: "/data1",
Destination: "/data1",
},
&types.VolumeBinding{
{
Source: "/data2",
Destination: "/data2",
},
Expand Down Expand Up @@ -93,13 +93,13 @@ func TestStorage(t *testing.T) {

resourceRequest, err := MakeRequest(types.ResourceOptions{
VolumeRequest: []*types.VolumeBinding{
&types.VolumeBinding{
{
Source: "/data1",
Destination: "/data1",
},
},
VolumeLimit: []*types.VolumeBinding{
&types.VolumeBinding{
{
Source: "/data1",
Destination: "/data1",
},
Expand Down Expand Up @@ -127,6 +127,7 @@ func TestStorage(t *testing.T) {
InitVolume: types.VolumeMap{"/data1": 512, "/data2": 512},
}

assert.NotNil(t, plans.Capacity())
plans.ApplyChangesOnNode(&node, 0)
assert.Less(t, node.Volume["/data1"], int64(512))

Expand All @@ -153,3 +154,15 @@ func TestStorage(t *testing.T) {
_, err = plans.Dispense(opts, r)
assert.Nil(t, err)
}

func TestRate(t *testing.T) {
req, err := MakeRequest(types.ResourceOptions{
VolumeRequest: types.VolumeBindings{&types.VolumeBinding{SizeInBytes: 1024}},
VolumeLimit: types.VolumeBindings{&types.VolumeBinding{SizeInBytes: 1024}},
})
assert.Nil(t, err)
node := types.Node{
Volume: types.VolumeMap{"1": 1024},
}
assert.Equal(t, req.Rate(node), 1.0)
}

0 comments on commit 173abec

Please sign in to comment.