Skip to content

Commit

Permalink
Merge branch 'refactor' into 'master'
Browse files Browse the repository at this point in the history
use sort.Slice refactor

See merge request !74
  • Loading branch information
zhangye committed Mar 27, 2017
2 parents 58ae883 + b9a3047 commit 7d6101b
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 51 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.7.26
0.7.26a
38 changes: 17 additions & 21 deletions scheduler/complex/cpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,10 @@ type NodeInfo struct {
NCon int
}

type ByNCon []NodeInfo

func (a ByNCon) Len() int { return len(a) }
func (a ByNCon) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNCon) Less(i, j int) bool { return a[i].NCon < a[j].NCon }
func (a ByNCon) volumn() int {
func volumn(nodeInfo []NodeInfo) int {
val := 0
len := a.Len()
for i := 0; i < len; i++ {
val += a[i].NCon
for _, v := range nodeInfo {
val += v.NCon
}
return val
}
Expand Down Expand Up @@ -197,15 +191,15 @@ func averagePlan(cpu float64, nodes map[string]types.CPUMap, need, maxShareCore,
var host *host
var plan []types.CPUMap
var n int
var nodeinfo ByNCon
var nodename string
var nodeInfo []NodeInfo
var nodeName string

if cpu < 0.01 {
resultLength := 0
r:
for {
for nodename, _ := range nodes {
result[nodename] = append(result[nodename], nil)
for nodeName, _ := range nodes {
result[nodeName] = append(result[nodeName], nil)
resultLength++
}
if resultLength == need {
Expand All @@ -219,32 +213,34 @@ func averagePlan(cpu float64, nodes map[string]types.CPUMap, need, maxShareCore,
host = newHost(cpuInfo, coreShare)
plan = host.getContainerCores(cpu, maxShareCore)
n = len(plan) // 每个node可以放的容器数
nodeinfo = append(nodeinfo, NodeInfo{node, n})
nodeInfo = append(nodeInfo, NodeInfo{node, n})
nodecontainer[node] = plan
}

if nodeinfo.volumn() < need {
if volumn(nodeInfo) < need {
return nil
}

// 排序
sort.Sort(nodeinfo)
sort.Slice(nodeInfo, func(i, j int) bool {
return nodeInfo[i].NCon < nodeInfo[j].NCon
})

// 决定分配方案
allocplan := allocPlan(nodeinfo, need)
allocplan := allocPlan(nodeInfo, need)
for node, ncon := range allocplan {
if ncon > 0 {
nodename = nodeinfo[node].Node
result[nodename] = nodecontainer[nodename][:ncon]
nodeName = nodeInfo[node].Node
result[nodeName] = nodecontainer[nodeName][:ncon]
}
}

return result
}

func allocPlan(info ByNCon, need int) map[int]int {
func allocPlan(info []NodeInfo, need int) map[int]int {
result := make(map[int]int)
NNode := info.Len()
NNode := len(info)

var nodeToUse, more int
for i := 0; i < NNode; i++ {
Expand Down
29 changes: 8 additions & 21 deletions utils/alloc_plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,23 +14,11 @@ type NodeInfo struct {
Memory int64
}

type ByCoreNum []NodeInfo

func (a ByCoreNum) Len() int { return len(a) }
func (a ByCoreNum) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByCoreNum) Less(i, j int) bool { return a[i].CorePer < a[j].CorePer }

type ByMemCap []NodeInfo

func (a ByMemCap) Len() int { return len(a) }
func (a ByMemCap) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByMemCap) Less(i, j int) bool { return a[i].Memory < a[j].Memory }

func AllocContainerPlan(nodeInfo ByCoreNum, quota int, memory int64, count int) (map[string]int, error) {
func AllocContainerPlan(nodeInfo []NodeInfo, quota int, memory int64, count int) (map[string]int, error) {
log.Debugf("[AllocContainerPlan]: nodeInfo: %v, quota: %d, memory: %d, count: %d", nodeInfo, quota, memory, count)

result := make(map[string]int)
N := nodeInfo.Len()
N := len(nodeInfo)
firstNodeWithEnoughCPU := -1

for i := 0; i < N; i++ {
Expand All @@ -46,7 +34,7 @@ func AllocContainerPlan(nodeInfo ByCoreNum, quota int, memory int64, count int)
log.Debugf("[AllocContainerPlan] the %d th node has enough cpu quota.", firstNodeWithEnoughCPU)

// 计算是否有足够的内存满足需求
nodeInfoList := ByMemCap{}
nodeInfoList := []NodeInfo{}
volTotal := 0
volEachNode := []int{} //为了排序
for i := firstNodeWithEnoughCPU; i < N; i++ {
Expand All @@ -58,8 +46,7 @@ func AllocContainerPlan(nodeInfo ByCoreNum, quota int, memory int64, count int)
}
}
if volTotal < count {
log.Errorf("[AllocContainerPlan] Cannot alloc a plan, volume %d, count %d", volTotal, count)
return result, fmt.Errorf("[AllocContainerPlan] Cannot alloc a plan, not enough memory.")
return result, fmt.Errorf("[AllocContainerPlan] Cannot alloc a plan, not enough memory, volume %d, count %d", volTotal, count)
}
log.Debugf("[AllocContainerPlan] volumn of each node: %v", volEachNode)

Expand All @@ -71,7 +58,7 @@ func AllocContainerPlan(nodeInfo ByCoreNum, quota int, memory int64, count int)
return result, err
}

sort.Sort(nodeInfoList)
sort.Slice(nodeInfoList, func(i, j int) bool { return nodeInfoList[i].Memory < nodeInfoList[j].Memory })
log.Debugf("[AllocContainerPlan] sorted nodeInfo: %v, ", nodeInfoList)
for i, num := range plan {
key := nodeInfoList[i].Name
Expand Down Expand Up @@ -134,11 +121,11 @@ func allocAlgorithm(info []int, need int) (map[int]int, error) {
return result, nil
}

func GetNodesInfo(cpumemmap map[string]types.CPUAndMem) ByCoreNum {
result := ByCoreNum{}
func GetNodesInfo(cpumemmap map[string]types.CPUAndMem) []NodeInfo {
result := []NodeInfo{}
for node, cpuandmem := range cpumemmap {
result = append(result, NodeInfo{node, len(cpuandmem.CpuMap) * CpuPeriodBase, cpuandmem.MemCap})
}
sort.Sort(result)
sort.Slice(result, func(i, j int) bool { return result[i].Memory < result[j].Memory })
return result
}
16 changes: 8 additions & 8 deletions utils/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ func TestGetGitRepoName(t *testing.T) {
assert.Equal(t, r1, "core")
}

func updateByCoreNum(input ByCoreNum, plan map[string]int, memory int64) ByCoreNum {
result := ByCoreNum{}
func updateNodeInfo(input []NodeInfo, plan map[string]int, memory int64) []NodeInfo {
result := []NodeInfo{}
for _, node := range input {
memoryChange := int64(plan[node.Name]) * memory
node.Memory -= memoryChange
Expand All @@ -57,7 +57,7 @@ func updateByCoreNum(input ByCoreNum, plan map[string]int, memory int64) ByCoreN

func TestContinuousAddingContainer(t *testing.T) {
// 测试分配算法随机性
testPodInfo := ByCoreNum{}
testPodInfo := []NodeInfo{}
node1 := NodeInfo{"n1", 20000, 3 * 1024 * 1024 * 1024}
node2 := NodeInfo{"n2", 30000, 3 * 1024 * 1024 * 1024}
// node3 := NodeInfo{"n3", 10000}
Expand All @@ -67,15 +67,15 @@ func TestContinuousAddingContainer(t *testing.T) {

for i := 0; i < 7; i++ {
res, err := AllocContainerPlan(testPodInfo, 10000, 512*1024*1024, 1)
testPodInfo = updateByCoreNum(testPodInfo, res, 512*1024*1024)
testPodInfo = updateNodeInfo(testPodInfo, res, 512*1024*1024)
// fmt.Println(testPodInfo)
fmt.Println(res)
assert.NoError(t, err)
}
}

func TestAlgorithm(t *testing.T) {
testPodInfo := ByCoreNum{}
testPodInfo := []NodeInfo{}
node1 := NodeInfo{"n1", 30000, 5 * 512 * 1024 * 1024}
node2 := NodeInfo{"n2", 30000, 5 * 512 * 1024 * 1024}
node3 := NodeInfo{"n3", 30000, 5 * 512 * 1024 * 1024}
Expand All @@ -90,7 +90,7 @@ func TestAlgorithm(t *testing.T) {
}

func TestAllocContainerPlan(t *testing.T) {
testPodInfo := ByCoreNum{}
testPodInfo := []NodeInfo{}
node1 := NodeInfo{"n1", 30000, 512 * 1024 * 1024}
node2 := NodeInfo{"n2", 30000, 3 * 512 * 1024 * 1024}
node3 := NodeInfo{"n3", 30000, 5 * 512 * 1024 * 1024}
Expand Down Expand Up @@ -121,7 +121,7 @@ func TestAllocContainerPlan(t *testing.T) {
}

func TestDebugPlan(t *testing.T) {
testPodInfo := ByCoreNum{}
testPodInfo := []NodeInfo{}
node0 := NodeInfo{"C2-docker-14", 1600000, 13732667392}
node1 := NodeInfo{"C2-docker-15", 1600000, 4303872000}
node2 := NodeInfo{"C2-docker-16", 1600000, 1317527552}
Expand All @@ -142,7 +142,7 @@ func TestDebugPlan(t *testing.T) {
}

func TestAllocAlgorithm(t *testing.T) {
testPodInfo := ByCoreNum{}
testPodInfo := []NodeInfo{}
node0 := NodeInfo{"C2-docker-15", 1600000, 0}
node1 := NodeInfo{"C2-docker-16", 1600000, 2357198848 * 2}
node2 := NodeInfo{"C2-docker-17", 1600000, 2357198848 * 2}
Expand Down

0 comments on commit 7d6101b

Please sign in to comment.