diff --git a/cluster/calcium/cluster.go b/cluster/calcium/cluster.go index cd0b102a5..ef92f512c 100644 --- a/cluster/calcium/cluster.go +++ b/cluster/calcium/cluster.go @@ -1,7 +1,6 @@ package calcium import ( - "fmt" "strings" "github.com/projecteru2/core/cluster" @@ -52,7 +51,7 @@ func New(config types.Config) (*Calcium, error) { case cluster.GITHUB: scm = github.New(config) default: - return nil, fmt.Errorf("Unkonwn SCM type: %s", config.Git.SCMType) + return nil, types.NewDetailedErr(types.ErrBadSCMType, scmtype) } return &Calcium{store: store, config: config, scheduler: scheduler, network: titanium, source: scm}, nil diff --git a/cluster/calcium/common.go b/cluster/calcium/common.go index 06ec341b7..963b32bf4 100644 --- a/cluster/calcium/common.go +++ b/cluster/calcium/common.go @@ -8,7 +8,7 @@ import ( const ( restartAlways = "always" - minMemory = 4194304 + minMemory = types.MByte * 4 root = "root" maxPuller = 10 ) diff --git a/cluster/calcium/create_container.go b/cluster/calcium/create_container.go index 41086a89d..85defd45b 100644 --- a/cluster/calcium/create_container.go +++ b/cluster/calcium/create_container.go @@ -31,15 +31,16 @@ func (c *Calcium) CreateContainer(ctx context.Context, opts *types.DeployOptions // 4194304 Byte = 4 MB, docker 创建容器的内存最低标准 if opts.Memory < minMemory { - return nil, fmt.Errorf("Minimum memory limit allowed is 4MB, got %d", opts.Memory) + return nil, types.NewDetailedErr(types.ErrBadMemory, + fmt.Sprintf("Minimum memory limit allowed is 4MB, got %d", opts.Memory)) } // Count 要大于0 if opts.Count <= 0 { - return nil, fmt.Errorf("Count must be positive, got %d", opts.Count) + return nil, types.NewDetailedErr(types.ErrBadCount, opts.Count) } // CPUQuota 也需要大于 0 if opts.CPUQuota <= 0 { - return nil, fmt.Errorf("CPUQuota must be positive, got %d", opts.Count) + return nil, types.NewDetailedErr(types.ErrBadCPU, opts.CPUQuota) } if pod.Favor == scheduler.MEMORY_PRIOR { @@ -47,7 +48,7 @@ func (c *Calcium) CreateContainer(ctx context.Context, opts *types.DeployOptions } else if pod.Favor == scheduler.CPU_PRIOR { return c.createContainerWithCPUPrior(ctx, opts) } - return nil, fmt.Errorf("[CreateContainer] Favor not support: %v", pod.Favor) + return nil, types.NewDetailedErr(types.ErrBadFaver, pod.Favor) } func (c *Calcium) createContainerWithMemoryPrior(ctx context.Context, opts *types.DeployOptions) (chan *types.CreateContainerMessage, error) { @@ -121,7 +122,7 @@ func (c *Calcium) createContainerWithCPUPrior(ctx context.Context, opts *types.D } if len(nodesInfo) == 0 { - return ch, fmt.Errorf("Not enough resource to create container") + return ch, types.ErrInsufficientRes } go func() { @@ -304,7 +305,7 @@ func (c *Calcium) makeContainerOptions(index int, quota types.CPUMap, opts *type } else if favor == scheduler.MEMORY_PRIOR { resource = makeMemoryPriorSetting(opts.Memory, opts.CPUQuota, opts.SoftLimit) } else { - return nil, nil, nil, "", fmt.Errorf("favor not support %s", favor) + return nil, nil, nil, "", types.NewDetailedErr(types.ErrBadFaver, favor) } resource.Ulimits = ulimits diff --git a/cluster/calcium/create_container_test.go b/cluster/calcium/create_container_test.go index 1a4e76a24..287742654 100644 --- a/cluster/calcium/create_container_test.go +++ b/cluster/calcium/create_container_test.go @@ -68,8 +68,7 @@ func TestClean(t *testing.T) { // delete pod, which will fail because there are remaining nodes err := mockc.store.RemovePod(context.Background(), podname) - assert.Error(t, err) - assert.Contains(t, err.Error(), "still has nodes") + assert.EqualError(t, types.IsDetailedErr(err), types.ErrPodHasNodes.Error()) } func TestCreateContainerWithCPUPrior(t *testing.T) { diff --git a/cluster/calcium/network.go b/cluster/calcium/network.go index 78263c315..aec82668d 100644 --- a/cluster/calcium/network.go +++ b/cluster/calcium/network.go @@ -2,7 +2,6 @@ package calcium import ( "context" - "fmt" "github.com/projecteru2/core/types" "github.com/projecteru2/core/utils" @@ -20,7 +19,7 @@ func (c *Calcium) ListNetworks(ctx context.Context, podname string, driver strin } if len(nodes) == 0 { - return networks, fmt.Errorf("Pod %s has no nodes", podname) + return networks, types.NewDetailedErr(types.ErrPodNoNodes, podname) } node := nodes[0] diff --git a/cluster/calcium/realloc.go b/cluster/calcium/realloc.go index dde9fd470..81d57ce36 100644 --- a/cluster/calcium/realloc.go +++ b/cluster/calcium/realloc.go @@ -2,7 +2,6 @@ package calcium import ( "context" - "fmt" "sync" enginecontainer "github.com/docker/docker/api/types/container" @@ -57,10 +56,10 @@ func (c *Calcium) ReallocResource(ctx context.Context, IDs []string, cpu float64 newCPURequire := container.Quota + cpu newMemRequire := container.Memory + mem if newCPURequire < 0 { - return nil, fmt.Errorf("CPU can not below zero %v", newCPURequire) + return nil, types.NewDetailedErr(types.ErrBadCPU, newCPURequire) } if newMemRequire < minMemory { - return nil, fmt.Errorf("Memory can not below zero %v", newMemRequire) + return nil, types.NewDetailedErr(types.ErrBadMemory, newMemRequire) } // init data @@ -115,7 +114,7 @@ func (c *Calcium) reallocNodesMemory(ctx context.Context, podname string, nodeCo defer lock.Unlock(ctx) for node, containers := range nodeContainers { if cap := int(node.MemCap / memory); cap < len(containers) { - return fmt.Errorf("Not enough resource %s", node.Name) + return types.NewDetailedErr(types.ErrInsufficientRes, node.Name) } if err := c.store.UpdateNodeResource(ctx, podname, node.Name, types.CPUMap{}, int64(len(containers))*memory, "-"); err != nil { return err @@ -259,7 +258,7 @@ func (c *Calcium) reallocNodesCPUMem( } // 这里只有1个节点,肯定会出现1个节点的解决方案 if total < need || len(nodeCPUPlans) != 1 { - return nil, fmt.Errorf("Not enough resource") + return nil, types.ErrInsufficientRes } cpuCost := types.CPUMap{} diff --git a/cluster/calcium/resource.go b/cluster/calcium/resource.go index 6027d04da..bb67661e8 100644 --- a/cluster/calcium/resource.go +++ b/cluster/calcium/resource.go @@ -2,7 +2,6 @@ package calcium import ( "context" - "fmt" "sort" log "github.com/sirupsen/logrus" @@ -45,7 +44,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions, case scheduler.CPU_PRIOR: nodesInfo, nodeCPUPlans, total, err = c.scheduler.SelectCPUNodes(nodesInfo, opts.CPUQuota, opts.Memory) default: - return nil, fmt.Errorf("Pod type not support yet") + return nil, types.ErrBadPodType } if err != nil { return nil, err @@ -59,7 +58,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions, case cluster.DeployFill: nodesInfo, err = c.scheduler.FillDivision(nodesInfo, opts.Count, total) default: - return nil, fmt.Errorf("Deploy method not support yet") + return nil, types.ErrBadDeployMethod } if err != nil { return nil, err @@ -70,7 +69,7 @@ func (c *Calcium) allocResource(ctx context.Context, opts *types.DeployOptions, p := sort.Search(len(nodesInfo), func(i int) bool { return nodesInfo[i].Deploy > 0 }) // p 最大也就是 len(nodesInfo) - 1 if p == len(nodesInfo) { - return nil, fmt.Errorf("Cannot alloc a plan, not enough resource") + return nil, types.ErrInsufficientRes } nodesInfo = nodesInfo[p:] for i, nodeInfo := range nodesInfo { @@ -117,8 +116,7 @@ func (c *Calcium) getCPUAndMem(ctx context.Context, podname, nodename string, la } if len(nodes) == 0 { - err := fmt.Errorf("No available nodes") - return result, err + return result, types.ErrInsufficientNodes } result = makeCPUAndMem(nodes) diff --git a/lock/etcdlock/lock.go b/lock/etcdlock/lock.go index a25fcf605..a712eff31 100644 --- a/lock/etcdlock/lock.go +++ b/lock/etcdlock/lock.go @@ -7,6 +7,7 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" + "github.com/projecteru2/core/types" "golang.org/x/net/context" ) @@ -20,7 +21,7 @@ type Mutex struct { //New new a lock func New(cli *clientv3.Client, key string, ttl int) (*Mutex, error) { if key == "" { - return nil, fmt.Errorf("No lock key") + return nil, types.ErrKeyIsEmpty } if !strings.HasPrefix(key, "/") { diff --git a/network/sdn/plugin.go b/network/sdn/plugin.go index cea1a5e74..dc1b2f1b5 100644 --- a/network/sdn/plugin.go +++ b/network/sdn/plugin.go @@ -18,12 +18,13 @@ type titanium struct{} // connect to network with ipv4 address func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID, ipv4 string) error { if len(containerID) != 64 { - return fmt.Errorf("ContainerID must be in length of 64") + return types.ErrBadContainerID } engine, ok := utils.GetDockerEngineFromContext(ctx) if !ok { - return fmt.Errorf("Not actually a `engineapi.Client` for value engine in context") + return types.NewDetailedErr(types.ErrCannotGetEngine, + fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context")) } config := &enginenetwork.EndpointSettings{ @@ -35,7 +36,7 @@ func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID, if ipv4 != "" { ip := net.ParseIP(ipv4) if ip == nil { - return fmt.Errorf("IP Address is not valid: %v", ipv4) + return types.NewDetailedErr(types.ErrBadIPAddress, ipv4) } config.IPAMConfig.IPv4Address = ip.String() @@ -48,12 +49,13 @@ func (t *titanium) ConnectToNetwork(ctx context.Context, containerID, networkID, // disconnect from network func (t *titanium) DisconnectFromNetwork(ctx context.Context, containerID, networkID string) error { if len(containerID) != 64 { - return fmt.Errorf("ContainerID must be in length of 64") + return types.ErrBadContainerID } engine, ok := utils.GetDockerEngineFromContext(ctx) if !ok { - return fmt.Errorf("Not actually a `engineapi.Client` for value engine in context") + return types.NewDetailedErr(types.ErrCannotGetEngine, + fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context")) } log.Debugf("[DisconnectFromNetwork] Disconnect %v from %v", containerID, networkID) @@ -65,7 +67,8 @@ func (t *titanium) ListNetworks(ctx context.Context, driver string) ([]*types.Ne networks := []*types.Network{} engine, ok := utils.GetDockerEngineFromContext(ctx) if !ok { - return networks, fmt.Errorf("Not actually a `engineapi.Client` for value engine in context") + return networks, types.NewDetailedErr(types.ErrCannotGetEngine, + fmt.Sprintf("Not actually a `engineapi.Client` for value engine in context")) } filters := enginefilters.NewArgs() diff --git a/rpc/rpc_test.go b/rpc/rpc_test.go index 3b72e32c0..9402b4605 100644 --- a/rpc/rpc_test.go +++ b/rpc/rpc_test.go @@ -290,12 +290,12 @@ func TestContainers(t *testing.T) { // test GetContainer error store.On("GetContainer", "012").Return(&types.Container{}, nil) _, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: "012"}) - assert.Contains(t, err.Error(), "Container ID must be length of 64") + assert.Contains(t, err.Error(), types.ErrBadContainerID.Error()) ID := "586f906185de3ed755d0db1c0f37149c1eae1ba557a26adccbe1f51c500d07d1" store.On("GetContainer", ID).Return(&types.Container{}, nil) _, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: ID}) - assert.Contains(t, err.Error(), "Engine is nil") + assert.Contains(t, err.Error(), types.ErrNilEngine.Error()) // test GetContainers container := types.Container{ @@ -462,8 +462,7 @@ func TestCreateContainer(t *testing.T) { resp, err := clnt.CreateContainer(ctx, &depOpts) assert.NoError(t, err) recv, err := resp.Recv() - assert.Error(t, err) - assert.Contains(t, err.Error(), "Minimum memory limit allowed is 4MB") + assert.Contains(t, err.Error(), types.ErrBadMemory.Error()) assert.Nil(t, recv) depOpts = pb.DeployOptions{ @@ -487,8 +486,7 @@ func TestCreateContainer(t *testing.T) { resp, err = clnt.CreateContainer(ctx, &depOpts) assert.NoError(t, err) recv, err = resp.Recv() - assert.Error(t, err) - assert.Contains(t, err.Error(), "Count must be positive") + assert.Contains(t, err.Error(), types.ErrBadCount.Error()) assert.Nil(t, recv) } @@ -540,7 +538,7 @@ func TestRunAndWait(t *testing.T) { assert.NoError(t, err) assert.NoError(t, stream.Send(&runAndWaitOpts)) _, err = stream.Recv() - assert.Contains(t, err.Error(), "Minimum memory limit allowed is 4MB") + assert.Contains(t, err.Error(), types.ErrBadMemory.Error()) depOpts = pb.DeployOptions{ Image: "", // string @@ -621,5 +619,5 @@ func TestOthers(t *testing.T) { }) r, err := rmContainerResp.Recv() // 同理这个err也是gRPC调用的error,而不是执行动作的error assert.Nil(t, err) - assert.Contains(t, r.GetMessage(), "Engine is nil") + assert.Contains(t, r.GetMessage(), types.ErrNilEngine.Error()) } diff --git a/scheduler/complex/average.go b/scheduler/complex/average.go index 465249bcb..7c0e6fdbc 100644 --- a/scheduler/complex/average.go +++ b/scheduler/complex/average.go @@ -1,7 +1,6 @@ package complexscheduler import ( - "fmt" "sort" "github.com/projecteru2/core/types" @@ -14,7 +13,7 @@ func AveragePlan(nodesInfo []types.NodeInfo, need int) ([]types.NodeInfo, error) sort.Slice(nodesInfo, func(i, j int) bool { return nodesInfo[i].Capacity < nodesInfo[j].Capacity }) p := sort.Search(nodesInfoLength, func(i int) bool { return nodesInfo[i].Capacity >= need }) if p == nodesInfoLength { - return nil, fmt.Errorf("Cannot alloc a each node plan, not enough capacity") + return nil, types.ErrInsufficientCap } nodesInfo = nodesInfo[p:] for i := range nodesInfo { diff --git a/scheduler/complex/cpu.go b/scheduler/complex/cpu.go index 17680aba1..e921cc989 100644 --- a/scheduler/complex/cpu.go +++ b/scheduler/complex/cpu.go @@ -5,7 +5,6 @@ CPU 分配的核心算法 package complexscheduler import ( - "fmt" "sort" "github.com/projecteru2/core/types" @@ -196,7 +195,7 @@ func cpuPriorPlan(cpu float64, memory int64, nodesInfo []types.NodeInfo, maxShar sort.Slice(nodesInfo, func(i, j int) bool { return nodesInfo[i].Capacity < nodesInfo[j].Capacity }) p := sort.Search(len(nodesInfo), func(i int) bool { return nodesInfo[i].Capacity > 0 }) if p == len(nodesInfo) { - return nil, nil, 0, fmt.Errorf("Not enough resource") + return nil, nil, 0, types.ErrInsufficientRes } log.Debugf("[cpuPriorPlan] nodesInfo: %v", nodesInfo) diff --git a/scheduler/complex/fill.go b/scheduler/complex/fill.go index 5c2116378..d9127ad03 100644 --- a/scheduler/complex/fill.go +++ b/scheduler/complex/fill.go @@ -20,7 +20,8 @@ func FillPlan(nodesInfo []types.NodeInfo, need int) ([]types.NodeInfo, error) { for i := range nodesInfo { diff := need - nodesInfo[i].Count if nodesInfo[i].Capacity < diff { - return nil, fmt.Errorf("Cannot alloc a fill node plan, node %s has not enough resource", nodesInfo[i].Name) + return nil, types.NewDetailedErr(types.ErrInsufficientRes, + fmt.Sprintf("node %s cannot alloc a fill node plan", nodesInfo[i].Name)) } nodesInfo[i].Deploy = diff nodesInfo[i].Capacity -= diff diff --git a/scheduler/complex/fill_test.go b/scheduler/complex/fill_test.go index af8017183..9a5eafbb4 100644 --- a/scheduler/complex/fill_test.go +++ b/scheduler/complex/fill_test.go @@ -4,6 +4,7 @@ import ( "sort" "testing" + "github.com/projecteru2/core/types" "github.com/stretchr/testify/assert" ) @@ -49,8 +50,7 @@ func TestFillPlan(t *testing.T) { n = 15 nodes = deployedNodes() _, err = FillPlan(nodes, n) - assert.Error(t, err) - assert.Contains(t, err.Error(), "node n2 has not enough resource") + assert.EqualError(t, types.IsDetailedErr(err), types.ErrInsufficientRes.Error()) // 全局补充不能 n = 1 diff --git a/scheduler/complex/potassium.go b/scheduler/complex/potassium.go index 86d5021eb..52c47c898 100644 --- a/scheduler/complex/potassium.go +++ b/scheduler/complex/potassium.go @@ -28,7 +28,7 @@ func (m *Potassium) MaxCPUIdleNode(nodes []*types.Node) *types.Node { func (m *Potassium) SelectMemoryNodes(nodesInfo []types.NodeInfo, rate, memory int64) ([]types.NodeInfo, int, error) { log.Debugf("[SelectMemoryNodes] nodesInfo: %v, rate: %d, memory: %d", nodesInfo, rate, memory) if memory <= 0 { - return nil, 0, fmt.Errorf("memory must positive") + return nil, 0, types.ErrNegativeMemory } nodesInfoLength := len(nodesInfo) @@ -38,7 +38,7 @@ func (m *Potassium) SelectMemoryNodes(nodesInfo []types.NodeInfo, rate, memory i p := sort.Search(nodesInfoLength, func(i int) bool { return nodesInfo[i].CPURate >= rate }) // p 最大也就是 nodesInfoLength - 1 if p == nodesInfoLength { - return nil, 0, fmt.Errorf("Cannot alloc a plan, not enough cpu rate") + return nil, 0, types.ErrInsufficientCPU } nodesInfoLength -= p nodesInfo = nodesInfo[p:] @@ -48,7 +48,7 @@ func (m *Potassium) SelectMemoryNodes(nodesInfo []types.NodeInfo, rate, memory i sort.Slice(nodesInfo, func(i, j int) bool { return nodesInfo[i].MemCap < nodesInfo[j].MemCap }) p = sort.Search(nodesInfoLength, func(i int) bool { return nodesInfo[i].MemCap >= memory }) if p == nodesInfoLength { - return nil, 0, fmt.Errorf("Cannot alloc a plan, not enough memory") + return nil, 0, types.ErrInsufficientMEM } nodesInfoLength -= p nodesInfo = nodesInfo[p:] @@ -69,10 +69,10 @@ func (m *Potassium) SelectMemoryNodes(nodesInfo []types.NodeInfo, rate, memory i func (m *Potassium) SelectCPUNodes(nodesInfo []types.NodeInfo, quota float64, memory int64) ([]types.NodeInfo, map[string][]types.CPUMap, int, error) { log.Debugf("[SelectCPUNodes] nodesInfo: %v, cpu: %v", nodesInfo, quota) if quota <= 0 { - return nil, nil, 0, fmt.Errorf("quota must positive") + return nil, nil, 0, types.ErrNegativeQuota } if len(nodesInfo) == 0 { - return nil, nil, 0, fmt.Errorf("No nodes provide to choose some") + return nil, nil, 0, types.ErrZeroNodes } return cpuPriorPlan(quota, memory, nodesInfo, m.maxshare, m.sharebase) } @@ -81,7 +81,8 @@ func (m *Potassium) SelectCPUNodes(nodesInfo []types.NodeInfo, quota float64, me // 部署完 N 个后全局尽可能平均 func (m *Potassium) CommonDivision(nodesInfo []types.NodeInfo, need, total int) ([]types.NodeInfo, error) { if total < need { - return nil, fmt.Errorf("Not enough resource need: %d, vol: %d", need, total) + return nil, types.NewDetailedErr(types.ErrInsufficientRes, + fmt.Sprintf("need: %d, vol: %d", need, total)) } return CommunismDivisionPlan(nodesInfo, need) } @@ -90,7 +91,8 @@ func (m *Potassium) CommonDivision(nodesInfo []types.NodeInfo, need, total int) // 容量够的机器每一台部署 N 个 func (m *Potassium) EachDivision(nodesInfo []types.NodeInfo, need, total int) ([]types.NodeInfo, error) { if total < need { - return nil, fmt.Errorf("Not enough resource need: %d, vol: %d", need, total) + return nil, types.NewDetailedErr(types.ErrInsufficientRes, + fmt.Sprintf("need: %d, vol: %d", need, total)) } return AveragePlan(nodesInfo, need) } @@ -99,7 +101,8 @@ func (m *Potassium) EachDivision(nodesInfo []types.NodeInfo, need, total int) ([ // 根据之前部署的策略每一台补充到 N 个,超过 N 个忽略 func (m *Potassium) FillDivision(nodesInfo []types.NodeInfo, need, total int) ([]types.NodeInfo, error) { if total < need { - return nil, fmt.Errorf("Not enough resource need: %d, vol: %d", need, total) + return nil, types.NewDetailedErr(types.ErrInsufficientRes, + fmt.Sprintf("need: %d, vol: %d", need, total)) } return FillPlan(nodesInfo, need) } diff --git a/scheduler/complex/potassium_test.go b/scheduler/complex/potassium_test.go index b14514fdc..ae68014da 100644 --- a/scheduler/complex/potassium_test.go +++ b/scheduler/complex/potassium_test.go @@ -103,7 +103,7 @@ func getComplexNodes() []types.NodeInfo { CpuMap: types.CPUMap{ // 2 containers "0": 10, "1": 10, "2": 10, "3": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n1", }, @@ -115,7 +115,7 @@ func getComplexNodes() []types.NodeInfo { "8": 10, "9": 10, "10": 10, "11": 10, "12": 10, "13": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n2", }, @@ -126,7 +126,7 @@ func getComplexNodes() []types.NodeInfo { "4": 10, "5": 10, "6": 10, "7": 10, "8": 10, "9": 10, "10": 10, "11": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n3", }, @@ -139,7 +139,7 @@ func getComplexNodes() []types.NodeInfo { "12": 10, "13": 10, "14": 10, "15": 10, "16": 10, "17": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n4", }, @@ -149,7 +149,7 @@ func getComplexNodes() []types.NodeInfo { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, "6": 10, "7": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n5", }, @@ -164,7 +164,7 @@ func getEvenPlanNodes() []types.NodeInfo { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, "6": 10, "7": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n1", }, @@ -175,7 +175,7 @@ func getEvenPlanNodes() []types.NodeInfo { "4": 10, "5": 10, "6": 10, "7": 10, "8": 10, "9": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n2", }, @@ -186,7 +186,7 @@ func getEvenPlanNodes() []types.NodeInfo { "4": 10, "5": 10, "6": 10, "7": 10, "8": 10, "9": 10, "10": 10, "11": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n3", }, @@ -197,7 +197,7 @@ func getEvenPlanNodes() []types.NodeInfo { "4": 10, "5": 10, "6": 10, "7": 10, "8": 10, "9": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n4", }, @@ -254,27 +254,24 @@ func SelectMemoryNodes(k *Potassium, nodesInfo []types.NodeInfo, rate, memory in func TestSelectCPUNodes(t *testing.T) { k, _ := newPotassium() - mem := int64(4 * 1024 * 1024 * 1024) + mem := 4 * types.GByte _, _, err := SelectCPUNodes(k, []types.NodeInfo{}, 1, 1, 1, false) - assert.Error(t, err) - assert.Equal(t, err.Error(), "No nodes provide to choose some") + assert.Equal(t, types.IsDetailedErr(err), types.ErrZeroNodes) nodes := generateNodes(2, 2, mem, 10) _, _, err = SelectCPUNodes(k, nodes, 0.5, 1, 1, false) assert.NoError(t, err) _, _, err = SelectCPUNodes(k, nodes, 2, 1, 3, false) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Not enough") + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) + assert.Contains(t, err.Error(), "need: 3, vol: 1") _, _, err = SelectCPUNodes(k, nodes, 3, 1, 2, false) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Not enough") + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) _, _, err = SelectCPUNodes(k, nodes, 1, 1, 5, false) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Not enough") + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) // new round test nodes = generateNodes(2, 2, mem, 10) @@ -320,13 +317,12 @@ func TestSelectCPUNodesWithMemoryLimit(t *testing.T) { // 测试 2 个 Node,内存不足 nodes = generateNodes(2, 2, 1024, 10) _, _, err = SelectCPUNodes(k, nodes, 0.1, 1025, 1, true) - assert.Contains(t, err.Error(), "enough") + assert.EqualError(t, err, types.ErrInsufficientRes.Error()) // 测试 need 超过 each node 的 capacity nodes = generateNodes(2, 2, 1024, 10) _, _, err = SelectCPUNodes(k, nodes, 0.1, 1024, 2, true) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not enough capacity") + assert.EqualError(t, err, types.ErrInsufficientCap.Error()) } func TestRecurrence(t *testing.T) { @@ -338,28 +334,28 @@ func TestRecurrence(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 0, "10": 0, "7": 0, "8": 10, "9": 10, "13": 0, "14": 0, "15": 10, "2": 10, "5": 10, "11": 0, "12": 0, "4": 0, "1": 0, "3": 10, "6": 0}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "c2-docker-26", }, types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"6": 10, "10": 0, "13": 0, "14": 10, "2": 0, "7": 0, "1": 0, "11": 0, "15": 0, "8": 10, "0": 0, "3": 0, "4": 0, "5": 0, "9": 10, "12": 0}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "c2-docker-27", }, types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"13": 0, "14": 0, "15": 0, "4": 10, "9": 0, "1": 0, "10": 0, "12": 10, "5": 10, "6": 10, "8": 10, "0": 0, "11": 0, "2": 10, "3": 0, "7": 0}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "c2-docker-28", }, types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"15": 0, "3": 10, "0": 0, "10": 0, "13": 0, "7": 10, "8": 0, "9": 10, "12": 10, "2": 10, "4": 10, "1": 0, "11": 0, "14": 10, "5": 10, "6": 10}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "c2-docker-29", }, @@ -426,7 +422,7 @@ func TestComplexNodes(t *testing.T) { //test5 nodes = getComplexNodes() _, _, err = SelectCPUNodes(k, nodes, 0, 1, 10, false) - assert.Equal(t, err.Error(), "quota must positive") + assert.EqualError(t, err, types.ErrNegativeQuota.Error()) //test6 nodes = getComplexNodes() @@ -449,14 +445,14 @@ func TestCPUWithMaxShareLimit(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 100, "1": 100, "2": 100, "3": 100, "4": 100, "5": 100}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, } _, _, err = SelectCPUNodes(k, nodes, 1.7, 1, 3, false) - assert.Error(t, err) + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) assert.Contains(t, err.Error(), "vol: 2") } @@ -473,7 +469,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 300, "1": 300}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -491,7 +487,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 300}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -505,7 +501,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 300}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -519,7 +515,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 100, "1": 200, "2": 300}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -534,7 +530,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 50, "1": 100, "2": 300, "3": 70, "4": 200, "5": 30, "6": 230}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -546,7 +542,7 @@ func TestCpuOverSell(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 70, "1": 100, "2": 400}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -571,7 +567,7 @@ func TestCPUOverSellAndStableFragmentCore(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 300, "1": 300}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -585,7 +581,7 @@ func TestCPUOverSellAndStableFragmentCore(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 230, "1": 200}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -607,7 +603,7 @@ func TestCPUOverSellAndStableFragmentCore(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 230, "1": 80, "2": 300, "3": 200}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -622,7 +618,7 @@ func TestCPUOverSellAndStableFragmentCore(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 70, "1": 50, "2": 100, "3": 100, "4": 100}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -637,7 +633,7 @@ func TestCPUOverSellAndStableFragmentCore(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"0": 70, "1": 50, "2": 90}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "nodes1", }, @@ -662,7 +658,7 @@ func TestEvenPlan(t *testing.T) { CpuMap: types.CPUMap{ "0": 10, "1": 10, "2": 10, "3": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "node1", }, @@ -671,7 +667,7 @@ func TestEvenPlan(t *testing.T) { CpuMap: types.CPUMap{ "0": 10, "1": 10, "2": 10, "3": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "node2", }, @@ -716,7 +712,7 @@ func TestSpecialCase(t *testing.T) { CpuMap: types.CPUMap{ // 4 containers "0": 10, "1": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n1", }, @@ -726,7 +722,7 @@ func TestSpecialCase(t *testing.T) { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n2", }, @@ -736,7 +732,7 @@ func TestSpecialCase(t *testing.T) { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, "6": 10, "7": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n3", }, @@ -754,7 +750,7 @@ func TestSpecialCase(t *testing.T) { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n1", }, @@ -764,7 +760,7 @@ func TestSpecialCase(t *testing.T) { "0": 10, "1": 10, "2": 10, "3": 10, "4": 10, "5": 10, "6": 10, "7": 10, }, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "n2", }, @@ -781,7 +777,7 @@ func TestGetPodVol(t *testing.T) { types.NodeInfo{ CPUAndMem: types.CPUAndMem{ CpuMap: types.CPUMap{"15": 0, "3": 10, "0": 0, "10": 0, "13": 0, "7": 10, "8": 0, "9": 10, "12": 10, "2": 10, "4": 10, "1": 0, "11": 0, "14": 10, "5": 10, "6": 10}, - MemCap: 12400000, + MemCap: 12 * types.GByte, }, Name: "c2-docker-26", }, @@ -803,7 +799,7 @@ func Benchmark_CPUAlloc(b *testing.B) { var count = 10000 for i := 0; i < b.N; i++ { // 24 core, 128G memory, 10 pieces per core - hugePod := generateNodes(count, 24, 137438953472, 10) + hugePod := generateNodes(count, 24, 128*types.GByte, 10) need := getNodesCapacity(hugePod, cpu, 10, -1) b.StartTimer() r, c, err := SelectCPUNodes(k, hugePod, cpu, 1, need, false) @@ -825,7 +821,7 @@ func Benchmark_MemAlloc(b *testing.B) { var rate int64 = 1024 for i := 0; i < b.N; i++ { // 24 core, 128G memory, 10 pieces per core - hugePod := generateNodes(count, 24, 137438953472, 10) + hugePod := generateNodes(count, 24, 128*types.GByte, 10) b.StartTimer() r, err := SelectMemoryNodes(k, hugePod, rate, memory, need, false) b.StopTimer() @@ -837,10 +833,10 @@ func Benchmark_MemAlloc(b *testing.B) { // Test SelectMemoryNodes func TestSelectMemoryNodes(t *testing.T) { // 2 nodes [2 containers per node] - mem := int64(4 * 1024 * 1024 * 1024) + mem := 4 * types.GByte pod := generateNodes(2, 2, mem, 10) k, _ := newPotassium() - res, err := SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 4, false) + res, err := SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 4, false) assert.NoError(t, err) for _, node := range res { assert.Equal(t, node.Deploy, 2) @@ -848,13 +844,13 @@ func TestSelectMemoryNodes(t *testing.T) { // 4 nodes [1 container per node] pod = generateNodes(4, 2, mem, 10) - res, err = SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 1, false) + res, err = SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 1, false) assert.NoError(t, err) assert.Equal(t, res[0].Deploy, 1) // 4 nodes [1 container per node] pod = generateNodes(4, 2, mem, 10) - res, err = SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 4, false) + res, err = SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 4, false) assert.NoError(t, err) for _, node := range res { assert.Equal(t, node.Deploy, 1) @@ -865,7 +861,7 @@ func TestSelectMemoryNodes(t *testing.T) { for i := 0; i < 4; i++ { pod[i].Count += i } - res, err = SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 6, false) + res, err = SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 6, false) assert.NoError(t, err) for i, node := range res { assert.Equal(t, node.Deploy, 3-i) @@ -873,7 +869,7 @@ func TestSelectMemoryNodes(t *testing.T) { pod = generateNodes(1, 2, mem, 10) _, err = SelectMemoryNodes(k, pod, 10000, 0, 10, false) - assert.Equal(t, err.Error(), "memory must positive") + assert.EqualError(t, err, types.ErrNegativeMemory.Error()) // test each pod = generateNodes(4, 2, mem, 10) @@ -885,37 +881,29 @@ func TestSelectMemoryNodes(t *testing.T) { } func TestSelectMemoryNodesNotEnough(t *testing.T) { - mem := int64(4 * 1024 * 1024) + mem := 4 * types.MByte // 2 nodes [mem not enough] - pod := generateNodes(2, 2, mem*1024, 10) + pod := generateNodes(2, 2, 4*types.GByte, 10) k, _ := newPotassium() - _, err := SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 40, false) - assert.Error(t, err) - if err != nil { - assert.Equal(t, err.Error(), "Not enough resource need: 40, vol: 16") - } + _, err := SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 40, false) + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) + assert.Contains(t, err.Error(), "need: 40, vol: 16") // 2 nodes [mem not enough] pod = generateNodes(2, 2, mem, 10) - _, err = SelectMemoryNodes(k, pod, 1e9, 5*1024*1024*1024, 1, false) - assert.Error(t, err) - if err != nil { - assert.Equal(t, err.Error(), "Cannot alloc a plan, not enough memory") - } + _, err = SelectMemoryNodes(k, pod, 1e9, 5*types.GByte, 1, false) + assert.Equal(t, err, types.ErrInsufficientMEM) // 2 nodes [cpu not enough] pod = generateNodes(2, 2, mem, 10) - _, err = SelectMemoryNodes(k, pod, 1e10, 512*1024*1024, 1, false) - assert.Error(t, err) - if err != nil { - assert.Equal(t, err.Error(), "Cannot alloc a plan, not enough cpu rate") - } + _, err = SelectMemoryNodes(k, pod, 1e10, 512*types.MByte, 1, false) + assert.Equal(t, err, types.ErrInsufficientCPU) } func TestSelectMemoryNodesSequence(t *testing.T) { - pod := generateNodes(2, 2, 4*1024*1024*1024, 10) + pod := generateNodes(2, 2, 4*types.GByte, 10) k, _ := newPotassium() - res, err := SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 1, false) + res, err := SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 1, false) assert.NoError(t, err) for _, node := range res { if node.Name == "node0" { @@ -924,7 +912,7 @@ func TestSelectMemoryNodesSequence(t *testing.T) { } refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 1, false) + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 1, false) assert.NoError(t, err) for _, node := range res { if node.Name == "node1" { @@ -933,27 +921,25 @@ func TestSelectMemoryNodesSequence(t *testing.T) { } refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 4, false) + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 4, false) assert.NoError(t, err) assert.Equal(t, res[0].Deploy, 2) assert.Equal(t, res[1].Deploy, 2) refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 3, false) + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 3, false) assert.NoError(t, err) assert.Equal(t, res[0].Deploy+res[1].Deploy, 3) assert.Equal(t, res[0].Deploy-res[1].Deploy, 1) refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 40, false) - assert.Error(t, err) - if err != nil { - assert.Equal(t, err.Error(), "Not enough resource need: 40, vol: 7") - } + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 40, false) + assert.Equal(t, types.IsDetailedErr(err), types.ErrInsufficientRes) + assert.Contains(t, err.Error(), "need: 40, vol: 7") // new round - pod = generateNodes(2, 2, 4*1024*1024*1024, 10) - res, err = SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 1, false) + pod = generateNodes(2, 2, 4*types.GByte, 10) + res, err = SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 1, false) assert.NoError(t, err) for _, node := range res { if node.Name == "node0" { @@ -961,7 +947,7 @@ func TestSelectMemoryNodesSequence(t *testing.T) { } } refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 2, false) + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 2, false) assert.NoError(t, err) for _, node := range res { if node.Name == "node1" { @@ -969,20 +955,20 @@ func TestSelectMemoryNodesSequence(t *testing.T) { } } refreshPod(res) - res, err = SelectMemoryNodes(k, res, 10000, 512*1024*1024, 5, false) + res, err = SelectMemoryNodes(k, res, 10000, 512*types.MByte, 5, false) assert.NoError(t, err) assert.Equal(t, res[0].Deploy+res[0].Count, 4) assert.Equal(t, res[1].Deploy+res[1].Count, 4) } func TestSelectMemoryNodesGiven(t *testing.T) { - pod := generateNodes(4, 2, 4*1024*1024*1024, 10) + pod := generateNodes(4, 2, 4*types.GByte, 10) for i := 0; i < 3; i++ { pod[i].Count++ } k, _ := newPotassium() - res, err := SelectMemoryNodes(k, pod, 10000, 512*1024*1024, 2, false) + res, err := SelectMemoryNodes(k, pod, 10000, 512*types.MByte, 2, false) assert.NoError(t, err) for _, node := range res { if node.Name == "n3" { diff --git a/store/etcd/container.go b/store/etcd/container.go index 49a5dc0cb..97d979dd2 100644 --- a/store/etcd/container.go +++ b/store/etcd/container.go @@ -63,8 +63,10 @@ func (k *Krypton) AddContainer(ctx context.Context, container *types.Container) // RemoveContainer remove a container // container id must be in full length func (k *Krypton) RemoveContainer(ctx context.Context, container *types.Container) error { - if len(container.ID) < 64 { - return fmt.Errorf("Container ID must be length of 64") + if l := len(container.ID); l != 64 { + return types.NewDetailedErr(types.ErrBadContainerID, + fmt.Sprintf("containerID: %s, length: %d", + container.ID, len(container.ID))) } appname, entrypoint, _, err := utils.ParseContainerName(container.Name) if err != nil { @@ -98,8 +100,9 @@ func (k *Krypton) CleanContainerData(ctx context.Context, ID, appname, entrypoin // container if must be in full length, or we can't find it in etcd // storage path in etcd is `/container/:containerid` func (k *Krypton) GetContainer(ctx context.Context, ID string) (*types.Container, error) { - if len(ID) != 64 { - return nil, fmt.Errorf("Container ID must be length of 64") + if l := len(ID); l != 64 { + return nil, types.NewDetailedErr(types.ErrBadContainerID, + fmt.Sprintf("containerID: %s, length: %d", ID, l)) } key := fmt.Sprintf(containerInfoKey, ID) @@ -108,7 +111,8 @@ func (k *Krypton) GetContainer(ctx context.Context, ID string) (*types.Container return nil, err } if resp.Node.Dir { - return nil, fmt.Errorf("Container storage path %q in etcd is a directory", key) + return nil, types.NewDetailedErr(types.ErrKeyIsDir, + fmt.Sprintf("container storage path %q in etcd is a directory", key)) } c := &types.Container{} diff --git a/store/etcd/krypton.go b/store/etcd/krypton.go index 84aeb357c..6c0d42144 100644 --- a/store/etcd/krypton.go +++ b/store/etcd/krypton.go @@ -32,7 +32,7 @@ const ( containerDeployStatusKey = "/deploy/%s/%s" containerDeployKey = "/deploy/%s/%s/%s/%s" - gigabyte = 1073741824 + nodeConnPrefixKey = "tcp://" ) //Krypton means store with etcd diff --git a/store/etcd/node.go b/store/etcd/node.go index cc21fc0b8..38fc20da9 100644 --- a/store/etcd/node.go +++ b/store/etcd/node.go @@ -20,8 +20,10 @@ import ( // AddNode save it to etcd // storage path in etcd is `/pod/:podname/node/:nodename/info` func (k *Krypton) AddNode(ctx context.Context, name, endpoint, podname, ca, cert, key string, cpu, share int, memory int64, labels map[string]string) (*types.Node, error) { - if !strings.HasPrefix(endpoint, "tcp://") { - return nil, fmt.Errorf("Endpoint must starts with tcp:// %q", endpoint) + if !strings.HasPrefix(endpoint, nodeConnPrefixKey) { + return nil, types.NewDetailedErr(types.ErrNodeFormat, + fmt.Sprintf("endpoint must starts with %s %q", + nodeConnPrefixKey, endpoint)) } _, err := k.GetPod(ctx, podname) @@ -31,7 +33,9 @@ func (k *Krypton) AddNode(ctx context.Context, name, endpoint, podname, ca, cert nodeKey := fmt.Sprintf(nodeInfoKey, podname, name) if _, err := k.etcd.Get(ctx, nodeKey, nil); err == nil { - return nil, fmt.Errorf("Node (%s, %s) already exists", podname, name) + return nil, types.NewDetailedErr(types.ErrNodeExist, + fmt.Sprintf("node %s:%s already exists", + podname, name)) } // 如果有tls的证书需要保存就保存一下 @@ -70,7 +74,7 @@ func (k *Krypton) AddNode(ctx context.Context, name, endpoint, podname, ca, cert ncpu = info.NCPU } if memory == 0 { - memcap = info.MemTotal - gigabyte + memcap = info.MemTotal - types.GByte } if share == 0 { share = k.config.Scheduler.ShareBase @@ -151,7 +155,8 @@ func (k *Krypton) GetNode(ctx context.Context, podname, nodename string) (*types return nil, err } if resp.Node.Dir { - return nil, fmt.Errorf("Node storage path %q in etcd is a directory", key) + return nil, types.NewDetailedErr(types.ErrKeyIsDir, + fmt.Sprintf("node storage path %q in etcd is a directory", key)) } node := &types.Node{} @@ -189,7 +194,8 @@ func (k *Krypton) GetNodesByPod(ctx context.Context, podname string) (nodes []*t return nodes, err } if !resp.Node.Dir { - return nil, fmt.Errorf("Node storage path %q in etcd is not a directory", key) + return nil, types.NewDetailedErr(types.ErrKeyIsNotDir, + fmt.Sprintf("node storage path %q in etcd is not a directory", key)) } for _, node := range resp.Node.Nodes { @@ -263,7 +269,8 @@ func (k *Krypton) UpdateNodeResource(ctx context.Context, podname, nodename stri return err } if resp.Node.Dir { - return fmt.Errorf("Node storage path %q in etcd is a directory", nodeKey) + return types.NewDetailedErr(types.ErrKeyIsDir, + fmt.Sprintf("node storage path %q in etcd is a directory", nodeKey)) } node := &types.Node{} diff --git a/store/etcd/pod.go b/store/etcd/pod.go index 4ea8bce08..a0bd707bb 100644 --- a/store/etcd/pod.go +++ b/store/etcd/pod.go @@ -21,7 +21,8 @@ func (k *Krypton) AddPod(ctx context.Context, name, favor, desc string) (*types. if favor == "" { favor = scheduler.MEMORY_PRIOR } else if favor != scheduler.MEMORY_PRIOR && favor != scheduler.CPU_PRIOR { - return nil, fmt.Errorf("favor should be either CPU or MEM, got %s", favor) + return nil, types.NewDetailedErr(types.ErrBadFaver, + fmt.Sprintf("got bad faver: %s", favor)) } pod := &types.Pod{Name: name, Desc: desc, Favor: favor} @@ -43,7 +44,8 @@ func (k *Krypton) GetPod(ctx context.Context, name string) (*types.Pod, error) { return nil, err } if resp.Node.Dir { - return nil, fmt.Errorf("Pod storage path %q in etcd is a directory", key) + return nil, types.NewDetailedErr(types.ErrKeyIsNotDir, + fmt.Sprintf("pod storage path %q in etcd is a directory", key)) } pod := &types.Pod{} @@ -59,8 +61,9 @@ func (k *Krypton) RemovePod(ctx context.Context, podname string) error { if err != nil && !client.IsKeyNotFound(err) { return err } - if len(ns) != 0 { - return fmt.Errorf("Pod %s still has %d nodes, delete them first", podname, len(ns)) + if l := len(ns); l != 0 { + return types.NewDetailedErr(types.ErrPodHasNodes, + fmt.Sprintf("pod %s still has %d nodes, delete them first", podname, l)) } _, err = k.etcd.Delete(ctx, key, &client.DeleteOptions{Dir: true, Recursive: true}) @@ -76,7 +79,8 @@ func (k *Krypton) GetAllPods(ctx context.Context) (pods []*types.Pod, err error) return pods, err } if !resp.Node.Dir { - return nil, fmt.Errorf("Pod storage path %q in etcd is not a directory", allPodsKey) + return nil, types.NewDetailedErr(types.ErrKeyIsNotDir, + fmt.Sprintf("pod storage path %q in etcd is not a directory", allPodsKey)) } for _, node := range resp.Node.Nodes { diff --git a/store/mock/store.go b/store/mock/store.go index 6a34672c3..43b319a9c 100644 --- a/store/mock/store.go +++ b/store/mock/store.go @@ -62,8 +62,9 @@ func (m *MockStore) RemovePod(ctx context.Context, podname string) error { if err != nil { return err } - if len(nodes) != 0 { - return fmt.Errorf("Pod %s still has nodes, delete the nodes first", podname) + if l := len(nodes); l != 0 { + return types.NewDetailedErr(types.ErrPodHasNodes, + fmt.Sprintf("pod %s still has %d nodes, delete them first", podname, l)) } args := m.Called(ctx, podname) if args.Get(0) != nil { @@ -161,7 +162,7 @@ func (m *MockStore) UpdateNodeResource(ctx context.Context, podname, nodename st // GetContainer fake get container func (m *MockStore) GetContainer(ctx context.Context, ID string) (*types.Container, error) { if len(ID) != 64 { - return nil, fmt.Errorf("Container ID must be length of 64") + return nil, types.ErrBadContainerID } args := m.Called(ID) if args.Get(0) != nil { diff --git a/types/container.go b/types/container.go index 8fbe4c901..4914bdfea 100644 --- a/types/container.go +++ b/types/container.go @@ -2,7 +2,6 @@ package types import ( "context" - "fmt" "time" enginetypes "github.com/docker/docker/api/types" @@ -40,7 +39,7 @@ func (c *Container) Inspect(ctx context.Context) (enginetypes.ContainerJSON, err inspectCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() if c.Engine == nil { - return enginetypes.ContainerJSON{}, fmt.Errorf("Engine is nil") + return enginetypes.ContainerJSON{}, ErrNilEngine } return c.Engine.ContainerInspect(inspectCtx, c.ID) } diff --git a/types/errors.go b/types/errors.go new file mode 100644 index 000000000..837910c17 --- /dev/null +++ b/types/errors.go @@ -0,0 +1,73 @@ +package types + +import ( + "errors" + "fmt" +) + +// errors +var ( + ErrInsufficientCPU = errors.New("cannot alloc a plan, not enough cpu rate") + ErrInsufficientMEM = errors.New("cannot alloc a plan, not enough memory") + ErrInsufficientCap = errors.New("cannot alloc a each node plan, not enough capacity") + ErrInsufficientRes = errors.New("not enough resource") + ErrInsufficientNodes = errors.New("not enough nodes") + + ErrNegativeMemory = errors.New("memory must be positive") + ErrNegativeQuota = errors.New("quota must be positive") + + ErrZeroNodes = errors.New("no nodes provide to choose some") + + ErrNodeFormat = errors.New("bad endpoint name") + ErrNodeExist = errors.New("node already exists") + + ErrKeyIsDir = errors.New("key is a directory") + ErrKeyIsNotDir = errors.New("key is not a directory") + ErrKeyIsEmpty = errors.New("key is empty") + + ErrBadContainerID = errors.New("container ID must be length of 64") + ErrBadDeployMethod = errors.New("deploy method not support yet") + ErrBadFaver = errors.New("favor should be either CPU or MEM") + ErrBadIPAddress = errors.New("bad IP address") + ErrBadPodType = errors.New("pod type not support yet") + ErrBadSCMType = errors.New("unkonwn SCM type") + ErrBadMemory = errors.New("bad `Memory` value") + ErrBadCPU = errors.New("bad `CPU` value") + ErrBadCount = errors.New("bad `Count` value") + + ErrPodHasNodes = errors.New("pod has nodes") + ErrPodNoNodes = errors.New("pod has no nodes") + + ErrCannotGetEngine = errors.New("cannot get engine") + ErrNilEngine = errors.New("engine is nil") +) + +// NewDetailedErr returns an error with details +func NewDetailedErr(err error, details interface{}) error { + return &detailedErr{ + details: fmt.Sprintf("%v", details), + err: err, + } +} + +// IsDetailedErr returns the error's type if the error +// is a detailedErr, otherwise return it back +func IsDetailedErr(err error) error { + if e, ok := err.(*detailedErr); ok { + return e.err + } + return err +} + +// detailedErr conains details +type detailedErr struct { + details string + err error +} + +func (de *detailedErr) Error() string { + if de.details != "" { + return fmt.Sprintf("%s: %s", de.err.Error(), de.details) + } + return de.err.Error() +} diff --git a/types/node.go b/types/node.go index 846c1a5c0..86499deb5 100644 --- a/types/node.go +++ b/types/node.go @@ -2,7 +2,6 @@ package types import ( "context" - "fmt" "net" "net/url" "sync" @@ -73,7 +72,7 @@ func (n *Node) Info(ctx context.Context) (enginetypes.Info, error) { infoCtx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() if n.Engine == nil { - return enginetypes.Info{}, fmt.Errorf("Node engine is nil") + return enginetypes.Info{}, ErrNilEngine } return n.Engine.Info(infoCtx) } diff --git a/types/units.go b/types/units.go new file mode 100644 index 000000000..1fd494d2a --- /dev/null +++ b/types/units.go @@ -0,0 +1,10 @@ +package types + +// size units +const ( + Byte int64 = 1 + + KByte = Byte << 10 + MByte = KByte << 10 + GByte = MByte << 10 +)