Skip to content

Commit

Permalink
chore: refactor error types and add units
Browse files Browse the repository at this point in the history
add a detailed error

fix tests
  • Loading branch information
wrfly authored and CMGS committed Jul 30, 2018
1 parent d5573b5 commit db000bc
Show file tree
Hide file tree
Showing 25 changed files with 249 additions and 167 deletions.
3 changes: 1 addition & 2 deletions cluster/calcium/cluster.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package calcium

import (
"fmt"
"strings"

"github.com/projecteru2/core/cluster"
Expand Down Expand Up @@ -52,7 +51,7 @@ func New(config types.Config) (*Calcium, error) {
case cluster.GITHUB:
scm = github.New(config)
default:
return nil, fmt.Errorf("Unkonwn SCM type: %s", config.Git.SCMType)
return nil, types.NewDetailedErr(types.ErrBadSCMType, scmtype)
}

return &Calcium{store: store, config: config, scheduler: scheduler, network: titanium, source: scm}, nil
Expand Down
2 changes: 1 addition & 1 deletion cluster/calcium/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (

const (
restartAlways = "always"
minMemory = 4194304
minMemory = types.MByte * 4
root = "root"
maxPuller = 10
)
Expand Down
13 changes: 7 additions & 6 deletions cluster/calcium/create_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,24 @@ func (c *Calcium) CreateContainer(ctx context.Context, opts *types.DeployOptions

// 4194304 Byte = 4 MB, docker 创建容器的内存最低标准
if opts.Memory < minMemory {
return nil, fmt.Errorf("Minimum memory limit allowed is 4MB, got %d", opts.Memory)
return nil, types.NewDetailedErr(types.ErrBadMemory,
fmt.Sprintf("Minimum memory limit allowed is 4MB, got %d", opts.Memory))
}
// Count 要大于0
if opts.Count <= 0 {
return nil, fmt.Errorf("Count must be positive, got %d", opts.Count)
return nil, types.NewDetailedErr(types.ErrBadCount, opts.Count)
}
// CPUQuota 也需要大于 0
if opts.CPUQuota <= 0 {
return nil, fmt.Errorf("CPUQuota must be positive, got %d", opts.Count)
return nil, types.NewDetailedErr(types.ErrBadCPU, opts.CPUQuota)
}

if pod.Favor == scheduler.MEMORY_PRIOR {
return c.createContainerWithMemoryPrior(ctx, opts)
} else if pod.Favor == scheduler.CPU_PRIOR {
return c.createContainerWithCPUPrior(ctx, opts)
}
return nil, fmt.Errorf("[CreateContainer] Favor not support: %v", pod.Favor)
return nil, types.NewDetailedErr(types.ErrBadFaver, pod.Favor)
}

func (c *Calcium) createContainerWithMemoryPrior(ctx context.Context, opts *types.DeployOptions) (chan *types.CreateContainerMessage, error) {
Expand Down Expand Up @@ -121,7 +122,7 @@ func (c *Calcium) createContainerWithCPUPrior(ctx context.Context, opts *types.D
}

if len(nodesInfo) == 0 {
return ch, fmt.Errorf("Not enough resource to create container")
return ch, types.ErrInsufficientRes
}

go func() {
Expand Down Expand Up @@ -304,7 +305,7 @@ func (c *Calcium) makeContainerOptions(index int, quota types.CPUMap, opts *type
} else if favor == scheduler.MEMORY_PRIOR {
resource = makeMemoryPriorSetting(opts.Memory, opts.CPUQuota, opts.SoftLimit)
} else {
return nil, nil, nil, "", fmt.Errorf("favor not support %s", favor)
return nil, nil, nil, "", types.NewDetailedErr(types.ErrBadFaver, favor)
}
resource.Ulimits = ulimits

Expand Down
3 changes: 1 addition & 2 deletions cluster/calcium/create_container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ func TestClean(t *testing.T) {

// delete pod, which will fail because there are remaining nodes
err := mockc.store.RemovePod(context.Background(), podname)
assert.Error(t, err)
assert.Contains(t, err.Error(), "still has nodes")
assert.EqualError(t, types.IsDetailedErr(err), types.ErrPodHasNodes.Error())
}

func TestCreateContainerWithCPUPrior(t *testing.T) {
Expand Down
3 changes: 1 addition & 2 deletions cluster/calcium/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package calcium

import (
"context"
"fmt"

"github.com/projecteru2/core/types"
"github.com/projecteru2/core/utils"
Expand All @@ -20,7 +19,7 @@ func (c *Calcium) ListNetworks(ctx context.Context, podname string, driver strin
}

if len(nodes) == 0 {
return networks, fmt.Errorf("Pod %s has no nodes", podname)
return networks, types.NewDetailedErr(types.ErrPodNoNodes, podname)
}

node := nodes[0]
Expand Down
9 changes: 4 additions & 5 deletions cluster/calcium/realloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package calcium

import (
"context"
"fmt"
"sync"

enginecontainer "github.com/docker/docker/api/types/container"
Expand Down Expand Up @@ -57,10 +56,10 @@ func (c *Calcium) ReallocResource(ctx context.Context, IDs []string, cpu float64
newCPURequire := container.Quota + cpu
newMemRequire := container.Memory + mem
if newCPURequire < 0 {
return nil, fmt.Errorf("CPU can not below zero %v", newCPURequire)
return nil, types.NewDetailedErr(types.ErrBadCPU, newCPURequire)
}
if newMemRequire < minMemory {
return nil, fmt.Errorf("Memory can not below zero %v", newMemRequire)
return nil, types.NewDetailedErr(types.ErrBadMemory, newMemRequire)
}

// init data
Expand Down Expand Up @@ -115,7 +114,7 @@ func (c *Calcium) reallocNodesMemory(ctx context.Context, podname string, nodeCo
defer lock.Unlock(ctx)
for node, containers := range nodeContainers {
if cap := int(node.MemCap / memory); cap < len(containers) {
return fmt.Errorf("Not enough resource %s", node.Name)
return types.NewDetailedErr(types.ErrInsufficientRes, node.Name)
}
if err := c.store.UpdateNodeResource(ctx, podname, node.Name, types.CPUMap{}, int64(len(containers))*memory, "-"); err != nil {
return err
Expand Down Expand Up @@ -259,7 +258,7 @@ func (c *Calcium) reallocNodesCPUMem(
}
// 这里只有1个节点,肯定会出现1个节点的解决方案
if total < need || len(nodeCPUPlans) != 1 {
return nil, fmt.Errorf("Not enough resource")
return nil, types.ErrInsufficientRes
}

cpuCost := types.CPUMap{}
Expand Down
10 changes: 4 additions & 6 deletions cluster/calcium/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package calcium

import (
"context"
"fmt"
"sort"

log "github.com/sirupsen/logrus"
Expand Down Expand Up @@ -45,7 +44,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions,
case scheduler.CPU_PRIOR:
nodesInfo, nodeCPUPlans, total, err = c.scheduler.SelectCPUNodes(nodesInfo, opts.CPUQuota, opts.Memory)
default:
return nil, fmt.Errorf("Pod type not support yet")
return nil, types.ErrBadPodType
}
if err != nil {
return nil, err
Expand All @@ -59,7 +58,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions,
case cluster.DeployFill:
nodesInfo, err = c.scheduler.FillDivision(nodesInfo, opts.Count, total)
default:
return nil, fmt.Errorf("Deploy method not support yet")
return nil, types.ErrBadDeployMethod
}
if err != nil {
return nil, err
Expand All @@ -70,7 +69,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions,
p := sort.Search(len(nodesInfo), func(i int) bool { return nodesInfo[i].Deploy > 0 })
// p 最大也就是 len(nodesInfo) - 1
if p == len(nodesInfo) {
return nil, fmt.Errorf("Cannot alloc a plan, not enough resource")
return nil, types.ErrInsufficientRes
}
nodesInfo = nodesInfo[p:]
for i, nodeInfo := range nodesInfo {
Expand Down Expand Up @@ -117,8 +116,7 @@ func (c *Calcium) getCPUAndMem(ctx context.Context, podname, nodename string, la
}

if len(nodes) == 0 {
err := fmt.Errorf("No available nodes")
return result, err
return result, types.ErrInsufficientNodes
}

result = makeCPUAndMem(nodes)
Expand Down
3 changes: 2 additions & 1 deletion lock/etcdlock/lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (

"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/projecteru2/core/types"
"golang.org/x/net/context"
)

Expand All @@ -20,7 +21,7 @@ type Mutex struct {
//New new a lock
func New(cli *clientv3.Client, key string, ttl int) (*Mutex, error) {
if key == "" {
return nil, fmt.Errorf("No lock key")
return nil, types.ErrKeyIsEmpty
}

if !strings.HasPrefix(key, "/") {
Expand Down
15 changes: 9 additions & 6 deletions network/sdn/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@ type titanium struct{}
// connect to network with ipv4 address
func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID, ipv4 string) error {
if len(containerID) != 64 {
return fmt.Errorf("ContainerID must be in length of 64")
return types.ErrBadContainerID
}

engine, ok := utils.GetDockerEngineFromContext(ctx)
if !ok {
return fmt.Errorf("Not actually a `engineapi.Client` for value engine in context")
return types.NewDetailedErr(types.ErrCannotGetEngine,
fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context"))
}

config := &enginenetwork.EndpointSettings{
Expand All @@ -35,7 +36,7 @@ func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID,
if ipv4 != "" {
ip := net.ParseIP(ipv4)
if ip == nil {
return fmt.Errorf("IP Address is not valid: %v", ipv4)
return types.NewDetailedErr(types.ErrBadIPAddress, ipv4)
}

config.IPAMConfig.IPv4Address = ip.String()
Expand All @@ -48,12 +49,13 @@ func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID,
// disconnect from network
func (t *titanium) DisconnectFromNetwork(ctx context.Context, containerID, networkID string) error {
if len(containerID) != 64 {
return fmt.Errorf("ContainerID must be in length of 64")
return types.ErrBadContainerID
}

engine, ok := utils.GetDockerEngineFromContext(ctx)
if !ok {
return fmt.Errorf("Not actually a `engineapi.Client` for value engine in context")
return types.NewDetailedErr(types.ErrCannotGetEngine,
fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context"))
}

log.Debugf("[DisconnectFromNetwork] Disconnect %v from %v", containerID, networkID)
Expand All @@ -65,7 +67,8 @@ func (t *titanium) ListNetworks(ctx context.Context, driver string) ([]*types.Ne
networks := []*types.Network{}
engine, ok := utils.GetDockerEngineFromContext(ctx)
if !ok {
return networks, fmt.Errorf("Not actually a `engineapi.Client` for value engine in context")
return networks, types.NewDetailedErr(types.ErrCannotGetEngine,
fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context"))
}

filters := enginefilters.NewArgs()
Expand Down
14 changes: 6 additions & 8 deletions rpc/rpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,12 +290,12 @@ func TestContainers(t *testing.T) {
// test GetContainer error
store.On("GetContainer", "012").Return(&types.Container{}, nil)
_, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: "012"})
assert.Contains(t, err.Error(), "Container ID must be length of 64")
assert.Contains(t, err.Error(), types.ErrBadContainerID.Error())

ID := "586f906185de3ed755d0db1c0f37149c1eae1ba557a26adccbe1f51c500d07d1"
store.On("GetContainer", ID).Return(&types.Container{}, nil)
_, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: ID})
assert.Contains(t, err.Error(), "Engine is nil")
assert.Contains(t, err.Error(), types.ErrNilEngine.Error())

// test GetContainers
container := types.Container{
Expand Down Expand Up @@ -462,8 +462,7 @@ func TestCreateContainer(t *testing.T) {
resp, err := clnt.CreateContainer(ctx, &depOpts)
assert.NoError(t, err)
recv, err := resp.Recv()
assert.Error(t, err)
assert.Contains(t, err.Error(), "Minimum memory limit allowed is 4MB")
assert.Contains(t, err.Error(), types.ErrBadMemory.Error())
assert.Nil(t, recv)

depOpts = pb.DeployOptions{
Expand All @@ -487,8 +486,7 @@ func TestCreateContainer(t *testing.T) {
resp, err = clnt.CreateContainer(ctx, &depOpts)
assert.NoError(t, err)
recv, err = resp.Recv()
assert.Error(t, err)
assert.Contains(t, err.Error(), "Count must be positive")
assert.Contains(t, err.Error(), types.ErrBadCount.Error())
assert.Nil(t, recv)

}
Expand Down Expand Up @@ -540,7 +538,7 @@ func TestRunAndWait(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, stream.Send(&runAndWaitOpts))
_, err = stream.Recv()
assert.Contains(t, err.Error(), "Minimum memory limit allowed is 4MB")
assert.Contains(t, err.Error(), types.ErrBadMemory.Error())

depOpts = pb.DeployOptions{
Image: "", // string
Expand Down Expand Up @@ -621,5 +619,5 @@ func TestOthers(t *testing.T) {
})
r, err := rmContainerResp.Recv() // 同理这个err也是gRPC调用的error,而不是执行动作的error
assert.Nil(t, err)
assert.Contains(t, r.GetMessage(), "Engine is nil")
assert.Contains(t, r.GetMessage(), types.ErrNilEngine.Error())
}
3 changes: 1 addition & 2 deletions scheduler/complex/average.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package complexscheduler

import (
"fmt"
"sort"

"github.com/projecteru2/core/types"
Expand All @@ -14,7 +13,7 @@ func AveragePlan(nodesInfo []types.NodeInfo, need int) ([]types.NodeInfo, error)
sort.Slice(nodesInfo, func(i, j int) bool { return nodesInfo[i].Capacity < nodesInfo[j].Capacity })
p := sort.Search(nodesInfoLength, func(i int) bool { return nodesInfo[i].Capacity >= need })
if p == nodesInfoLength {
return nil, fmt.Errorf("Cannot alloc a each node plan, not enough capacity")
return nil, types.ErrInsufficientCap
}
nodesInfo = nodesInfo[p:]
for i := range nodesInfo {
Expand Down
3 changes: 1 addition & 2 deletions scheduler/complex/cpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ CPU 分配的核心算法
package complexscheduler

import (
"fmt"
"sort"

"github.com/projecteru2/core/types"
Expand Down Expand Up @@ -196,7 +195,7 @@ func cpuPriorPlan(cpu float64, memory int64, nodesInfo []types.NodeInfo, maxShar
sort.Slice(nodesInfo, func(i, j int) bool { return nodesInfo[i].Capacity < nodesInfo[j].Capacity })
p := sort.Search(len(nodesInfo), func(i int) bool { return nodesInfo[i].Capacity > 0 })
if p == len(nodesInfo) {
return nil, nil, 0, fmt.Errorf("Not enough resource")
return nil, nil, 0, types.ErrInsufficientRes
}

log.Debugf("[cpuPriorPlan] nodesInfo: %v", nodesInfo)
Expand Down
3 changes: 2 additions & 1 deletion scheduler/complex/fill.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ func FillPlan(nodesInfo []types.NodeInfo, need int) ([]types.NodeInfo, error) {
for i := range nodesInfo {
diff := need - nodesInfo[i].Count
if nodesInfo[i].Capacity < diff {
return nil, fmt.Errorf("Cannot alloc a fill node plan, node %s has not enough resource", nodesInfo[i].Name)
return nil, types.NewDetailedErr(types.ErrInsufficientRes,
fmt.Sprintf("node %s cannot alloc a fill node plan", nodesInfo[i].Name))
}
nodesInfo[i].Deploy = diff
nodesInfo[i].Capacity -= diff
Expand Down
4 changes: 2 additions & 2 deletions scheduler/complex/fill_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"sort"
"testing"

"github.com/projecteru2/core/types"
"github.com/stretchr/testify/assert"
)

Expand Down Expand Up @@ -49,8 +50,7 @@ func TestFillPlan(t *testing.T) {
n = 15
nodes = deployedNodes()
_, err = FillPlan(nodes, n)
assert.Error(t, err)
assert.Contains(t, err.Error(), "node n2 has not enough resource")
assert.EqualError(t, types.IsDetailedErr(err), types.ErrInsufficientRes.Error())

// 全局补充不能
n = 1
Expand Down
Loading

0 comments on commit db000bc

Please sign in to comment.