diff --git a/cluster/calcium/build.go b/cluster/calcium/build.go index df3445744..0a7e6e997 100644 --- a/cluster/calcium/build.go +++ b/cluster/calcium/build.go @@ -25,7 +25,7 @@ func (c *Calcium) BuildImage(ctx context.Context, opts *types.BuildOptions) (ch // select nodes node, err := c.selectBuildNode(ctx) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } log.Infof("[BuildImage] Building image at pod %s node %s", node.Podname, node.Name) // get refs @@ -41,7 +41,7 @@ func (c *Calcium) BuildImage(ctx context.Context, opts *types.BuildOptions) (ch default: return nil, logger.Err(errors.WithStack(errors.New("unknown build type"))) } - return ch, logger.Err(errors.WithStack(err)) + return ch, logger.Err(err) } func (c *Calcium) selectBuildNode(ctx context.Context) (*types.Node, error) { @@ -54,14 +54,14 @@ func (c *Calcium) selectBuildNode(ctx context.Context) (*types.Node, error) { // get node by scheduler nodes, err := c.ListPodNodes(ctx, c.config.Docker.BuildPod, nil, false) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if len(nodes) == 0 { return nil, errors.WithStack(types.ErrInsufficientNodes) } // get idle max node node, err := c.scheduler.MaxIdleNode(nodes) - return node, errors.WithStack(err) + return node, err } func (c *Calcium) buildFromSCM(ctx context.Context, node *types.Node, refs []string, opts *types.BuildOptions) (chan *types.BuildImageMessage, error) { @@ -76,7 +76,7 @@ func (c *Calcium) buildFromSCM(ctx context.Context, node *types.Node, refs []str return nil, errors.WithStack(err) } ch, err := c.buildFromContent(ctx, node, refs, content) - return ch, errors.WithStack(err) + return ch, err } func (c *Calcium) buildFromContent(ctx context.Context, node *types.Node, refs []string, content io.Reader) (chan *types.BuildImageMessage, error) { @@ -85,7 +85,7 @@ func (c *Calcium) buildFromContent(ctx context.Context, node *types.Node, refs [ return nil, errors.WithStack(err) } ch, err := c.pushImage(ctx, resp, node, refs) - return ch, errors.WithStack(err) + return ch, err } func (c *Calcium) buildFromExist(ctx context.Context, ref, existID string) (chan *types.BuildImageMessage, error) { // nolint:unparam diff --git a/cluster/calcium/capacity.go b/cluster/calcium/capacity.go index f456765bc..586e88f4b 100644 --- a/cluster/calcium/capacity.go +++ b/cluster/calcium/capacity.go @@ -23,7 +23,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio if opts.DeployStrategy != strategy.Dummy { if _, msg.NodeCapacities, err = c.doAllocResource(ctx, nodeMap, opts); err != nil { logger.Errorf("[Calcium.CalculateCapacity] doAllocResource failed: %+v", err) - return errors.WithStack(err) + return err } for _, capacity := range msg.NodeCapacities { @@ -34,7 +34,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio msg.Total, _, infos, err = c.doCalculateCapacity(nodeMap, opts) if err != nil { logger.Errorf("[Calcium.CalculateCapacity] doCalculateCapacity failed: %+v", err) - return errors.WithStack(err) + return err } for _, info := range infos { msg.NodeCapacities[info.Nodename] = info.Capacity @@ -56,12 +56,12 @@ func (c *Calcium) doCalculateCapacity(nodeMap map[string]*types.Node, opts *type resourceRequests, err := resources.MakeRequests(opts.ResourceOpts) if err != nil { - return 0, nil, nil, errors.WithStack(err) + return 0, nil, nil, err } // select available nodes if plans, err = resources.SelectNodesByResourceRequests(resourceRequests, nodeMap); err != nil { - return 0, nil, nil, errors.WithStack(err) + return 0, nil, nil, err } log.Debugf("[Calcium.doCalculateCapacity] plans: %+v, total: %v", plans, total) diff --git a/cluster/calcium/control.go b/cluster/calcium/control.go index 69a9be6e5..f4ad4a0bf 100644 --- a/cluster/calcium/control.go +++ b/cluster/calcium/control.go @@ -30,18 +30,18 @@ func (c *Calcium) ControlWorkload(ctx context.Context, ids []string, t string, f switch t { case cluster.WorkloadStop: message, err = c.doStopWorkload(ctx, workload, force) - return errors.WithStack(err) + return err case cluster.WorkloadStart: message, err = c.doStartWorkload(ctx, workload, force) - return errors.WithStack(err) + return err case cluster.WorkloadRestart: message, err = c.doStopWorkload(ctx, workload, force) if err != nil { - return errors.WithStack(err) + return err } startHook, err := c.doStartWorkload(ctx, workload, force) message = append(message, startHook...) - return errors.WithStack(err) + return err } return errors.WithStack(types.ErrUnknownControlType) }) @@ -65,7 +65,7 @@ func (c *Calcium) ControlWorkload(ctx context.Context, ids []string, t string, f func (c *Calcium) doStartWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) { if err = workload.Start(ctx); err != nil { - return message, errors.WithStack(err) + return message, err } // TODO healthcheck first if workload.Hook != nil && len(workload.Hook.AfterStart) > 0 { @@ -77,7 +77,7 @@ func (c *Calcium) doStartWorkload(ctx context.Context, workload *types.Workload, force, workload.Engine, ) } - return message, errors.WithStack(err) + return message, err } func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) { @@ -90,7 +90,7 @@ func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload, force, workload.Engine, ) if err != nil { - return message, errors.WithStack(err) + return message, err } } @@ -100,5 +100,5 @@ func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload, if err = workload.Stop(ctx, force); err != nil { message = append(message, bytes.NewBufferString(err.Error())) } - return message, errors.WithStack(err) + return message, err } diff --git a/cluster/calcium/copy.go b/cluster/calcium/copy.go index 6b1eba683..527573bda 100644 --- a/cluster/calcium/copy.go +++ b/cluster/calcium/copy.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/pkg/errors" "github.com/projecteru2/core/log" "github.com/projecteru2/core/types" ) @@ -13,7 +12,7 @@ import ( func (c *Calcium) Copy(ctx context.Context, opts *types.CopyOptions) (chan *types.CopyMessage, error) { logger := log.WithField("Calcium", "Copy").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } ch := make(chan *types.CopyMessage) go func() { diff --git a/cluster/calcium/create.go b/cluster/calcium/create.go index 8f4d03a87..27fdb2a39 100644 --- a/cluster/calcium/create.go +++ b/cluster/calcium/create.go @@ -24,7 +24,7 @@ import ( func (c *Calcium) CreateWorkload(ctx context.Context, opts *types.DeployOptions) (chan *types.CreateWorkloadMessage, error) { logger := log.WithField("Calcium", "CreateWorkload").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.ProcessIdent = utils.RandomString(16) @@ -97,7 +97,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio // then: deploy workloads func(ctx context.Context) (err error) { rollbackMap, err = c.doDeployWorkloads(ctx, ch, opts, plans, deployMap) - return errors.WithStack(err) + return err }, // rollback: give back resources @@ -115,7 +115,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio err = e } } - return errors.WithStack(err) + return err }, c.config.GlobalTimeout, @@ -145,7 +145,7 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context, ch chan *types.CreateWo wg.Wait() log.Debugf("[Calcium.doDeployWorkloads] rollbackMap: %+v", rollbackMap) - return rollbackMap, errors.WithStack(err) + return rollbackMap, err } // deploy scheduled workloads on one node @@ -156,7 +156,7 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr for i := 0; i < deploy; i++ { ch <- &types.CreateWorkloadMessage{Error: logger.Err(err)} } - return utils.Range(deploy), errors.WithStack(err) + return utils.Range(deploy), err } pool, appendLock := utils.NewGoroutinePool(int(c.config.MaxConcurrency)), sync.Mutex{} @@ -194,7 +194,7 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr createMsg.ResourceMeta = *r createOpts := c.doMakeWorkloadOptions(seq+idx, createMsg, opts, node) - e = errors.WithStack(c.doDeployOneWorkload(ctx, node, opts, createMsg, createOpts, deploy-1-idx)) + e = c.doDeployOneWorkload(ctx, node, opts, createMsg, createOpts, deploy-1-idx) } }(idx)) } @@ -209,16 +209,16 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr }); err != nil { logger.Errorf("failed to lock node to remap: %v", err) } - return indices, errors.WithStack(err) + return indices, err } func (c *Calcium) doGetAndPrepareNode(ctx context.Context, nodename, image string) (*types.Node, error) { node, err := c.GetNode(ctx, nodename) if err != nil { - return nil, errors.WithStack(err) + return nil, err } - return node, errors.WithStack(pullImage(ctx, node, image)) + return node, pullImage(ctx, node, image) } // transaction: workload metadata consistency @@ -277,7 +277,7 @@ func (c *Calcium) doDeployOneWorkload( // so there's a time gap window, once the core process crashes between // VirtualizationCreate and logCreateWorkload then the worload is leaky. if commit, err = c.wal.logCreateWorkload(ctx, workload.ID, node.Name); err != nil { - return errors.WithStack(err) + return err } return nil }, @@ -291,7 +291,7 @@ func (c *Calcium) doDeployOneWorkload( return errors.WithStack(err) } if err = c.doSendFileToWorkload(ctx, node.Engine, workload.ID, dst, reader, true, false); err != nil { - return errors.WithStack(err) + return err } } } @@ -314,14 +314,14 @@ func (c *Calcium) doDeployOneWorkload( // start first msg.Hook, err = c.doStartWorkload(ctx, workload, opts.IgnoreHook) if err != nil { - return errors.WithStack(err) + return err } // inspect real meta var workloadInfo *enginetypes.VirtualizationInfo workloadInfo, err = workload.Inspect(ctx) // 补充静态元数据 if err != nil { - return errors.WithStack(err) + return err } // update meta @@ -357,7 +357,7 @@ func (c *Calcium) doDeployOneWorkload( if workload.ID == "" { return nil } - return errors.WithStack(c.doRemoveWorkload(ctx, workload, true)) + return c.doRemoveWorkload(ctx, workload, true) }, c.config.GlobalTimeout, ) diff --git a/cluster/calcium/dissociate.go b/cluster/calcium/dissociate.go index c1d2b5ad0..bd227359b 100644 --- a/cluster/calcium/dissociate.go +++ b/cluster/calcium/dissociate.go @@ -17,7 +17,7 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, ids) if err != nil { logger.Errorf("failed to group workloads by node: %+v", err) - return nil, errors.WithStack(err) + return nil, err } ch := make(chan *types.DissociateWorkloadMessage) diff --git a/cluster/calcium/image.go b/cluster/calcium/image.go index 5acba79e1..1fac6e660 100644 --- a/cluster/calcium/image.go +++ b/cluster/calcium/image.go @@ -14,13 +14,13 @@ import ( func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (chan *types.RemoveImageMessage, error) { logger := log.WithField("Calcium", "RemoveImage").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.Normalize() nodes, err := c.getNodes(ctx, opts.Podname, opts.Nodenames, nil, false) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } if len(nodes) == 0 { @@ -77,13 +77,13 @@ func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (ch func (c *Calcium) CacheImage(ctx context.Context, opts *types.ImageOptions) (chan *types.CacheImageMessage, error) { logger := log.WithField("Calcium", "CacheImage").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.Normalize() nodes, err := c.getNodes(ctx, opts.Podname, opts.Nodenames, nil, false) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } if len(nodes) == 0 { diff --git a/cluster/calcium/lambda.go b/cluster/calcium/lambda.go index a69d36499..7b03a3223 100644 --- a/cluster/calcium/lambda.go +++ b/cluster/calcium/lambda.go @@ -27,7 +27,7 @@ const ( func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inCh <-chan []byte) (<-chan *types.AttachWorkloadMessage, error) { logger := log.WithField("Calcium", "RunAndWait").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.Lambda = true // count = 1 && OpenStdin @@ -38,12 +38,12 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC commit, err := c.walCreateLambda(ctx, opts) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } createChan, err := c.CreateWorkload(ctx, opts) if err != nil { logger.Errorf("[RunAndWait] Create workload error %+v", err) - return nil, errors.WithStack(err) + return nil, err } runMsgCh := make(chan *types.AttachWorkloadMessage) diff --git a/cluster/calcium/lock.go b/cluster/calcium/lock.go index 4e2289fda..d4a4928d8 100644 --- a/cluster/calcium/lock.go +++ b/cluster/calcium/lock.go @@ -39,7 +39,7 @@ func (c *Calcium) doUnlockAll(ctx context.Context, locks map[string]lock.Distrib func (c *Calcium) withWorkloadLocked(ctx context.Context, id string, f func(context.Context, *types.Workload) error) error { return c.withWorkloadsLocked(ctx, []string{id}, func(ctx context.Context, workloads map[string]*types.Workload) error { if c, ok := workloads[id]; ok { - return errors.WithStack(f(ctx, c)) + return f(ctx, c) } return errors.WithStack(types.ErrWorkloadNotExists) }) @@ -48,7 +48,7 @@ func (c *Calcium) withWorkloadLocked(ctx context.Context, id string, f func(cont func (c *Calcium) withNodeLocked(ctx context.Context, nodename string, f func(context.Context, *types.Node) error) error { return c.withNodesLocked(ctx, "", []string{nodename}, nil, true, func(ctx context.Context, nodes map[string]*types.Node) error { if n, ok := nodes[nodename]; ok { - return errors.WithStack(f(ctx, n)) + return f(ctx, n) } return errors.WithStack(types.ErrNodeNotExists) }) @@ -61,7 +61,7 @@ func (c *Calcium) withWorkloadsLocked(ctx context.Context, ids []string, f func( defer func() { c.doUnlockAll(context.Background(), locks) }() cs, err := c.GetWorkloads(ctx, ids) if err != nil { - return errors.WithStack(err) + return err } var lock lock.DistributedLock for _, workload := range cs { @@ -73,7 +73,7 @@ func (c *Calcium) withWorkloadsLocked(ctx context.Context, ids []string, f func( locks[workload.ID] = lock workloads[workload.ID] = workload } - return errors.WithStack(f(ctx, workloads)) + return f(ctx, workloads) } func (c *Calcium) withNodesLocked(ctx context.Context, podname string, nodenames []string, labels map[string]string, all bool, f func(context.Context, map[string]*types.Node) error) error { @@ -83,23 +83,23 @@ func (c *Calcium) withNodesLocked(ctx context.Context, podname string, nodenames defer c.doUnlockAll(context.Background(), locks) ns, err := c.getNodes(ctx, podname, nodenames, labels, all) if err != nil { - return errors.WithStack(err) + return err } var lock lock.DistributedLock for _, n := range ns { lock, ctx, err = c.doLock(ctx, fmt.Sprintf(cluster.NodeLock, podname, n.Name), c.config.LockTimeout) if err != nil { - return errors.WithStack(err) + return err } log.Debugf("[withNodesLocked] Node %s locked", n.Name) locks[n.Name] = lock // refresh node node, err := c.GetNode(ctx, n.Name) if err != nil { - return errors.WithStack(err) + return err } nodes[n.Name] = node } - return errors.WithStack(f(ctx, nodes)) + return f(ctx, nodes) } diff --git a/cluster/calcium/node.go b/cluster/calcium/node.go index 5366a0003..e99956c0d 100644 --- a/cluster/calcium/node.go +++ b/cluster/calcium/node.go @@ -13,7 +13,7 @@ import ( func (c *Calcium) AddNode(ctx context.Context, opts *types.AddNodeOptions) (*types.Node, error) { logger := log.WithField("Calcium", "AddNode").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.Normalize() node, err := c.store.AddNode(ctx, opts) @@ -29,7 +29,7 @@ func (c *Calcium) RemoveNode(ctx context.Context, nodename string) error { return c.withNodeLocked(ctx, nodename, func(ctx context.Context, node *types.Node) error { ws, err := c.ListNodeWorkloads(ctx, node.Name, nil) if err != nil { - return logger.Err(errors.WithStack(err)) + return logger.Err(err) } if len(ws) > 0 { return logger.Err(errors.WithStack(types.ErrNodeNotEmpty)) @@ -58,7 +58,7 @@ func (c *Calcium) GetNode(ctx context.Context, nodename string) (*types.Node, er func (c *Calcium) SetNode(ctx context.Context, opts *types.SetNodeOptions) (*types.Node, error) { // nolint logger := log.WithField("Calcium", "SetNode").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } var n *types.Node return n, c.withNodeLocked(ctx, opts.Nodename, func(ctx context.Context, node *types.Node) error { @@ -166,12 +166,12 @@ func (c *Calcium) getNodes(ctx context.Context, podname string, nodenames []stri for _, nodename := range nodenames { node, err := c.GetNode(ctx, nodename) if err != nil { - return ns, errors.WithStack(err) + return ns, err } ns = append(ns, node) } } else { ns, err = c.ListPodNodes(ctx, podname, labels, all) } - return ns, errors.WithStack(err) + return ns, err } diff --git a/cluster/calcium/realloc.go b/cluster/calcium/realloc.go index b21cdbd60..de85f9dfb 100644 --- a/cluster/calcium/realloc.go +++ b/cluster/calcium/realloc.go @@ -32,9 +32,9 @@ func (c *Calcium) ReallocResource(ctx context.Context, opts *types.ReallocOption }, ) if err != nil { - return logger.Err(errors.WithStack(err)) + return logger.Err(err) } - return logger.Err(errors.WithStack(c.doReallocOnNode(ctx, workload.Nodename, workload, rrs))) + return logger.Err(c.doReallocOnNode(ctx, workload.Nodename, workload, rrs)) }) } @@ -44,7 +44,7 @@ func (c *Calcium) doReallocOnNode(ctx context.Context, nodename string, workload node.RecycleResources(&workload.ResourceMeta) plans, err := resources.SelectNodesByResourceRequests(rrs, map[string]*types.Node{node.Name: node}) if err != nil { - return errors.WithStack(err) + return err } originalWorkload := *workload @@ -60,11 +60,11 @@ func (c *Calcium) doReallocOnNode(ctx context.Context, nodename string, workload Node: node, ExistingInstance: workload, }, resourceMeta); err != nil { - return errors.WithStack(err) + return err } } - return errors.WithStack(c.doReallocWorkloadsOnInstance(ctx, node.Engine, resourceMeta, workload)) + return c.doReallocWorkloadsOnInstance(ctx, node.Engine, resourceMeta, workload) }, // then commit changes func(ctx context.Context) error { @@ -93,7 +93,7 @@ func (c *Calcium) doReallocOnNode(ctx context.Context, nodename string, workload StorageRequest: originalWorkload.StorageRequest, StorageLimit: originalWorkload.StorageLimit, } - return errors.WithStack(c.doReallocWorkloadsOnInstance(ctx, node.Engine, r, workload)) + return c.doReallocWorkloadsOnInstance(ctx, node.Engine, r, workload) }, c.config.GlobalTimeout, diff --git a/cluster/calcium/remove.go b/cluster/calcium/remove.go index 5187d58c4..8a64a38a7 100644 --- a/cluster/calcium/remove.go +++ b/cluster/calcium/remove.go @@ -21,7 +21,7 @@ func (c *Calcium) RemoveWorkload(ctx context.Context, ids []string, force bool, nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, ids) if err != nil { logger.Errorf("failed to group workloads by node: %+v", err) - return nil, errors.WithStack(err) + return nil, err } ch := make(chan *types.RemoveWorkloadMessage) @@ -45,7 +45,7 @@ func (c *Calcium) RemoveWorkload(ctx context.Context, ids []string, force bool, }, // then func(ctx context.Context) (err error) { - if err = errors.WithStack(c.doRemoveWorkload(ctx, workload, force)); err != nil { + if err = c.doRemoveWorkload(ctx, workload, force); err != nil { log.Infof("[RemoveWorkload] Workload %s removed", workload.ID) } return err @@ -88,7 +88,7 @@ func (c *Calcium) doRemoveWorkload(ctx context.Context, workload *types.Workload }, // then func(ctx context.Context) error { - return errors.WithStack(workload.Remove(ctx, force)) + return workload.Remove(ctx, force) }, // rollback func(ctx context.Context, failedByCond bool) error { @@ -105,7 +105,7 @@ func (c *Calcium) doRemoveWorkload(ctx context.Context, workload *types.Workload func (c *Calcium) doRemoveWorkloadSync(ctx context.Context, ids []string) error { ch, err := c.RemoveWorkload(ctx, ids, true, 1) if err != nil { - return errors.WithStack(err) + return err } for m := range ch { diff --git a/cluster/calcium/replace.go b/cluster/calcium/replace.go index 94eb426ad..d4f372267 100644 --- a/cluster/calcium/replace.go +++ b/cluster/calcium/replace.go @@ -17,7 +17,7 @@ import ( func (c *Calcium) ReplaceWorkload(ctx context.Context, opts *types.ReplaceOptions) (chan *types.ReplaceWorkloadMessage, error) { logger := log.WithField("Calcium", "ReplaceWorkload").WithField("opts", opts) if err := opts.Validate(); err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } opts.Normalize() if len(opts.IDs) == 0 { @@ -29,7 +29,7 @@ func (c *Calcium) ReplaceWorkload(ctx context.Context, opts *types.ReplaceOption Appname: opts.Name, Entrypoint: opts.Entrypoint.Name, Nodename: nodename, }) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } for _, workload := range workloads { opts.IDs = append(opts.IDs, workload.ID) @@ -76,7 +76,7 @@ func (c *Calcium) ReplaceWorkload(ctx context.Context, opts *types.ReplaceOption if replaceOpts.NetworkInherit { info, err := workload.Inspect(ctx) if err != nil { - return errors.WithStack(err) + return err } else if !info.Running { return errors.WithStack(types.NewDetailedErr(types.ErrNotSupport, fmt.Sprintf("workload %s is not running, can not inherit", workload.ID), @@ -86,7 +86,7 @@ func (c *Calcium) ReplaceWorkload(ctx context.Context, opts *types.ReplaceOption log.Infof("[ReplaceWorkload] Inherit old workload network configuration mode %v", replaceOpts.Networks) } createMessage, removeMessage, err = c.doReplaceWorkload(ctx, workload, &replaceOpts, index) - return errors.WithStack(err) + return err }); err != nil { if errors.Is(err, types.ErrIgnoreWorkload) { log.Warnf("[ReplaceWorkload] ignore workload: %v", err) @@ -125,7 +125,7 @@ func (c *Calcium) doReplaceWorkload( // prepare node node, err := c.doGetAndPrepareNode(ctx, workload.Nodename, opts.Image) if err != nil { - return nil, removeMessage, errors.WithStack(err) + return nil, removeMessage, err } // 获得文件 io for src, dst := range opts.Copy { @@ -158,7 +158,7 @@ func (c *Calcium) doReplaceWorkload( // if func(ctx context.Context) (err error) { removeMessage.Hook, err = c.doStopWorkload(ctx, workload, opts.IgnoreHook) - return errors.WithStack(err) + return err }, // then func(ctx context.Context) error { @@ -167,13 +167,13 @@ func (c *Calcium) doReplaceWorkload( // if func(ctx context.Context) error { vco := c.doMakeReplaceWorkloadOptions(index, createMessage, &opts.DeployOptions, node, workload.ID) - return errors.WithStack(c.doDeployOneWorkload(ctx, node, &opts.DeployOptions, createMessage, vco, -1)) + return c.doDeployOneWorkload(ctx, node, &opts.DeployOptions, createMessage, vco, -1) }, // then func(ctx context.Context) (err error) { if err = c.doRemoveWorkload(ctx, workload, true); err != nil { log.Errorf("[doReplaceWorkload] the new started but the old failed to stop") - return errors.WithStack(err) + return err } removeMessage.Success = true return @@ -191,7 +191,7 @@ func (c *Calcium) doReplaceWorkload( } else { removeMessage.Hook = append(removeMessage.Hook, messages...) } - return errors.WithStack(err) + return err }, c.config.GlobalTimeout, ); err != nil { diff --git a/cluster/calcium/resource.go b/cluster/calcium/resource.go index 527af9c9c..f94118f7a 100644 --- a/cluster/calcium/resource.go +++ b/cluster/calcium/resource.go @@ -19,7 +19,7 @@ func (c *Calcium) PodResource(ctx context.Context, podname string) (*types.PodRe logger := log.WithField("Calcium", "PodResource").WithField("podname", podname) nodes, err := c.ListPodNodes(ctx, podname, nil, true) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } r := &types.PodResource{ Name: podname, @@ -28,7 +28,7 @@ func (c *Calcium) PodResource(ctx context.Context, podname string) (*types.PodRe for _, node := range nodes { nodeResource, err := c.doGetNodeResource(ctx, node.Name, false) if err != nil { - return nil, logger.Err(errors.WithStack(err)) + return nil, logger.Err(err) } r.NodesResource = append(r.NodesResource, nodeResource) } @@ -60,7 +60,7 @@ func (c *Calcium) doGetNodeResource(ctx context.Context, nodename string, fix bo return nr, c.withNodeLocked(ctx, nodename, func(ctx context.Context, node *types.Node) error { workloads, err := c.ListNodeWorkloads(ctx, node.Name, nil) if err != nil { - return errors.WithStack(err) + return err } nr = &types.NodeResource{ Name: node.Name, CPU: node.CPU, MemCap: node.MemCap, StorageCap: node.StorageCap, @@ -128,7 +128,7 @@ func (c *Calcium) doFixDiffResource(ctx context.Context, node *types.Node, cpus return utils.Txn(ctx, func(ctx context.Context) error { if n, err = c.GetNode(ctx, node.Name); err != nil { - return errors.WithStack(err) + return err } n.CPUUsed = cpus for i, v := range node.CPU { @@ -149,14 +149,14 @@ func (c *Calcium) doFixDiffResource(ctx context.Context, node *types.Node, cpus func (c *Calcium) doAllocResource(ctx context.Context, nodeMap map[string]*types.Node, opts *types.DeployOptions) ([]resourcetypes.ResourcePlans, map[string]int, error) { total, plans, strategyInfos, err := c.doCalculateCapacity(nodeMap, opts) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } if err := c.store.MakeDeployStatus(ctx, opts, strategyInfos); err != nil { return nil, nil, errors.WithStack(err) } deployMap, err := strategy.Deploy(opts, strategyInfos, total) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } log.Infof("[Calium.doAllocResource] deployMap: %+v", deployMap) return plans, deployMap, nil diff --git a/cluster/calcium/workload.go b/cluster/calcium/workload.go index 045527a45..50a2ce7f0 100644 --- a/cluster/calcium/workload.go +++ b/cluster/calcium/workload.go @@ -32,10 +32,10 @@ func (c *Calcium) ListNodeWorkloads(ctx context.Context, nodename string, labels func (c *Calcium) getWorkloadNode(ctx context.Context, id string) (*types.Node, error) { w, err := c.GetWorkload(ctx, id) if err != nil { - return nil, errors.WithStack(err) + return nil, err } node, err := c.GetNode(ctx, w.Nodename) - return node, errors.WithStack(err) + return node, err } // GetWorkload get a workload diff --git a/resources/scheduler.go b/resources/scheduler.go index 02611df5f..f07c9a288 100644 --- a/resources/scheduler.go +++ b/resources/scheduler.go @@ -1,7 +1,6 @@ package resources import ( - "github.com/pkg/errors" "github.com/projecteru2/core/log" resourcetypes "github.com/projecteru2/core/resources/types" "github.com/projecteru2/core/types" @@ -23,7 +22,7 @@ func SelectNodesByResourceRequests(resourceRequests resourcetypes.ResourceReques for _, resourceRequest := range resourceRequests { plan, _, err := resourceRequest.MakeScheduler()(scheduleInfos) if err != nil { - return plans, errors.WithStack(err) + return plans, err } plans = append(plans, plan) } diff --git a/scheduler/complex/cpu.go b/scheduler/complex/cpu.go index 8daa06e0c..67909bd21 100644 --- a/scheduler/complex/cpu.go +++ b/scheduler/complex/cpu.go @@ -8,6 +8,7 @@ import ( "math" "sort" + "github.com/pkg/errors" "github.com/projecteru2/core/log" resourcetypes "github.com/projecteru2/core/resources/types" "github.com/projecteru2/core/types" @@ -79,7 +80,7 @@ func cpuPriorPlan(cpu float64, memory int64, scheduleInfos []resourcetypes.Sched sort.Slice(scheduleInfos, func(i, j int) bool { return scheduleInfos[i].Capacity < scheduleInfos[j].Capacity }) p := sort.Search(len(scheduleInfos), func(i int) bool { return scheduleInfos[i].Capacity > 0 }) if p == len(scheduleInfos) { - return nil, nil, 0, types.ErrInsufficientRes + return nil, nil, 0, errors.WithStack(types.ErrInsufficientRes) } return scheduleInfos[p:], nodeWorkload, volTotal, nil diff --git a/scheduler/complex/potassium.go b/scheduler/complex/potassium.go index 6994642cb..67daec762 100644 --- a/scheduler/complex/potassium.go +++ b/scheduler/complex/potassium.go @@ -303,7 +303,7 @@ func (m *Potassium) SelectVolumeNodes(scheduleInfos []resourcetypes.ScheduleInfo sort.Slice(scheduleInfos, func(i, j int) bool { return scheduleInfos[i].Capacity < scheduleInfos[j].Capacity }) p := sort.Search(len(scheduleInfos), func(i int) bool { return scheduleInfos[i].Capacity > 0 }) if p == len(scheduleInfos) { - return nil, nil, 0, types.ErrInsufficientRes + return nil, nil, 0, errors.WithStack(types.ErrInsufficientRes) } return scheduleInfos[p:], volumePlans, volTotal, nil diff --git a/types/options.go b/types/options.go index 110b0dea9..13edef101 100644 --- a/types/options.go +++ b/types/options.go @@ -1,5 +1,7 @@ package types +import "github.com/pkg/errors" + // TODO should validate options // DeployOptions is options for deploying @@ -34,16 +36,16 @@ type DeployOptions struct { // Validate checks options func (o *DeployOptions) Validate() error { if o.Name == "" { - return ErrEmptyAppName + return errors.WithStack(ErrEmptyAppName) } if o.Podname == "" { - return ErrEmptyPodName + return errors.WithStack(ErrEmptyAppName) } if o.Image == "" { - return ErrEmptyImage + return errors.WithStack(ErrEmptyImage) } if o.Count == 0 { - return ErrEmptyCount + return errors.WithStack(ErrEmptyCount) } return o.Entrypoint.Validate() } @@ -56,7 +58,7 @@ type CopyOptions struct { // Validate checks options func (o *CopyOptions) Validate() error { if len(o.Targets) == 0 { - return ErrNoFilesToCopy + return errors.WithStack(ErrNoFilesToCopy) } return nil } @@ -70,10 +72,10 @@ type SendOptions struct { // Validate checks options func (o *SendOptions) Validate() error { if len(o.IDs) == 0 { - return ErrNoWorkloadIDs + return errors.WithStack(ErrNoWorkloadIDs) } if len(o.Data) == 0 { - return ErrNoFilesToSend + return errors.WithStack(ErrNoFilesToSend) } return nil } @@ -101,7 +103,7 @@ type ReplaceOptions struct { // to keep the original behavior, no check here. func (o *ReplaceOptions) Validate() error { if o.DeployOptions.Name == "" { - return ErrEmptyAppName + return errors.WithStack(ErrEmptyAppName) } return o.DeployOptions.Entrypoint.Validate() } @@ -134,13 +136,13 @@ type AddNodeOptions struct { // Validate checks options func (o *AddNodeOptions) Validate() error { if o.Nodename == "" { - return ErrEmptyNodeName + return errors.WithStack(ErrEmptyNodeName) } if o.Podname == "" { - return ErrEmptyPodName + return errors.WithStack(ErrEmptyPodName) } if o.Endpoint == "" { - return ErrEmptyNodeEndpoint + return errors.WithStack(ErrEmptyNodeEndpoint) } return nil } @@ -167,7 +169,7 @@ type SetNodeOptions struct { // Validate checks options func (o *SetNodeOptions) Validate() error { if o.Nodename == "" { - return ErrEmptyNodeName + return errors.WithStack(ErrEmptyNodeName) } return nil } @@ -195,7 +197,7 @@ type ImageOptions struct { // Validate checks the options func (o *ImageOptions) Validate() error { if o.Podname == "" { - return ErrEmptyPodName + return errors.WithStack(ErrEmptyPodName) } return nil } diff --git a/types/specs.go b/types/specs.go index 25133cbd7..94f186179 100644 --- a/types/specs.go +++ b/types/specs.go @@ -2,6 +2,8 @@ package types import ( "strings" + + "github.com/pkg/errors" ) // Hook define hooks @@ -36,10 +38,10 @@ type Entrypoint struct { // Validate checks entrypoint's name func (e *Entrypoint) Validate() error { if e.Name == "" { - return ErrEmptyEntrypointName + return errors.WithStack(ErrEmptyEntrypointName) } if strings.Contains(e.Name, "_") { - return ErrUnderlineInEntrypointName + return errors.WithStack(ErrUnderlineInEntrypointName) } return nil }