Skip to content

Commit

Permalink
keep error log stack concise
Browse files Browse the repository at this point in the history
  • Loading branch information
jschwinger233 authored and CMGS committed Mar 11, 2021
1 parent 1ee5e6f commit f67f8af
Show file tree
Hide file tree
Showing 20 changed files with 104 additions and 101 deletions.
12 changes: 6 additions & 6 deletions cluster/calcium/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func (c *Calcium) BuildImage(ctx context.Context, opts *types.BuildOptions) (ch
// select nodes
node, err := c.selectBuildNode(ctx)
if err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
log.Infof("[BuildImage] Building image at pod %s node %s", node.Podname, node.Name)
// get refs
Expand All @@ -41,7 +41,7 @@ func (c *Calcium) BuildImage(ctx context.Context, opts *types.BuildOptions) (ch
default:
return nil, logger.Err(errors.WithStack(errors.New("unknown build type")))
}
return ch, logger.Err(errors.WithStack(err))
return ch, logger.Err(err)
}

func (c *Calcium) selectBuildNode(ctx context.Context) (*types.Node, error) {
Expand All @@ -54,14 +54,14 @@ func (c *Calcium) selectBuildNode(ctx context.Context) (*types.Node, error) {
// get node by scheduler
nodes, err := c.ListPodNodes(ctx, c.config.Docker.BuildPod, nil, false)
if err != nil {
return nil, errors.WithStack(err)
return nil, err
}
if len(nodes) == 0 {
return nil, errors.WithStack(types.ErrInsufficientNodes)
}
// get idle max node
node, err := c.scheduler.MaxIdleNode(nodes)
return node, errors.WithStack(err)
return node, err
}

func (c *Calcium) buildFromSCM(ctx context.Context, node *types.Node, refs []string, opts *types.BuildOptions) (chan *types.BuildImageMessage, error) {
Expand All @@ -76,7 +76,7 @@ func (c *Calcium) buildFromSCM(ctx context.Context, node *types.Node, refs []str
return nil, errors.WithStack(err)
}
ch, err := c.buildFromContent(ctx, node, refs, content)
return ch, errors.WithStack(err)
return ch, err
}

func (c *Calcium) buildFromContent(ctx context.Context, node *types.Node, refs []string, content io.Reader) (chan *types.BuildImageMessage, error) {
Expand All @@ -85,7 +85,7 @@ func (c *Calcium) buildFromContent(ctx context.Context, node *types.Node, refs [
return nil, errors.WithStack(err)
}
ch, err := c.pushImage(ctx, resp, node, refs)
return ch, errors.WithStack(err)
return ch, err
}

func (c *Calcium) buildFromExist(ctx context.Context, ref, existID string) (chan *types.BuildImageMessage, error) { // nolint:unparam
Expand Down
8 changes: 4 additions & 4 deletions cluster/calcium/capacity.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio
if opts.DeployStrategy != strategy.Dummy {
if _, msg.NodeCapacities, err = c.doAllocResource(ctx, nodeMap, opts); err != nil {
logger.Errorf("[Calcium.CalculateCapacity] doAllocResource failed: %+v", err)
return errors.WithStack(err)
return err
}

for _, capacity := range msg.NodeCapacities {
Expand All @@ -34,7 +34,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio
msg.Total, _, infos, err = c.doCalculateCapacity(nodeMap, opts)
if err != nil {
logger.Errorf("[Calcium.CalculateCapacity] doCalculateCapacity failed: %+v", err)
return errors.WithStack(err)
return err
}
for _, info := range infos {
msg.NodeCapacities[info.Nodename] = info.Capacity
Expand All @@ -56,12 +56,12 @@ func (c *Calcium) doCalculateCapacity(nodeMap map[string]*types.Node, opts *type

resourceRequests, err := resources.MakeRequests(opts.ResourceOpts)
if err != nil {
return 0, nil, nil, errors.WithStack(err)
return 0, nil, nil, err
}

// select available nodes
if plans, err = resources.SelectNodesByResourceRequests(resourceRequests, nodeMap); err != nil {
return 0, nil, nil, errors.WithStack(err)
return 0, nil, nil, err
}
log.Debugf("[Calcium.doCalculateCapacity] plans: %+v, total: %v", plans, total)

Expand Down
16 changes: 8 additions & 8 deletions cluster/calcium/control.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,18 @@ func (c *Calcium) ControlWorkload(ctx context.Context, ids []string, t string, f
switch t {
case cluster.WorkloadStop:
message, err = c.doStopWorkload(ctx, workload, force)
return errors.WithStack(err)
return err
case cluster.WorkloadStart:
message, err = c.doStartWorkload(ctx, workload, force)
return errors.WithStack(err)
return err
case cluster.WorkloadRestart:
message, err = c.doStopWorkload(ctx, workload, force)
if err != nil {
return errors.WithStack(err)
return err
}
startHook, err := c.doStartWorkload(ctx, workload, force)
message = append(message, startHook...)
return errors.WithStack(err)
return err
}
return errors.WithStack(types.ErrUnknownControlType)
})
Expand All @@ -65,7 +65,7 @@ func (c *Calcium) ControlWorkload(ctx context.Context, ids []string, t string, f

func (c *Calcium) doStartWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) {
if err = workload.Start(ctx); err != nil {
return message, errors.WithStack(err)
return message, err
}
// TODO healthcheck first
if workload.Hook != nil && len(workload.Hook.AfterStart) > 0 {
Expand All @@ -77,7 +77,7 @@ func (c *Calcium) doStartWorkload(ctx context.Context, workload *types.Workload,
force, workload.Engine,
)
}
return message, errors.WithStack(err)
return message, err
}

func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) {
Expand All @@ -90,7 +90,7 @@ func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload,
force, workload.Engine,
)
if err != nil {
return message, errors.WithStack(err)
return message, err
}
}

Expand All @@ -100,5 +100,5 @@ func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload,
if err = workload.Stop(ctx, force); err != nil {
message = append(message, bytes.NewBufferString(err.Error()))
}
return message, errors.WithStack(err)
return message, err
}
3 changes: 1 addition & 2 deletions cluster/calcium/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"sync"

"github.com/pkg/errors"
"github.com/projecteru2/core/log"
"github.com/projecteru2/core/types"
)
Expand All @@ -13,7 +12,7 @@ import (
func (c *Calcium) Copy(ctx context.Context, opts *types.CopyOptions) (chan *types.CopyMessage, error) {
logger := log.WithField("Calcium", "Copy").WithField("opts", opts)
if err := opts.Validate(); err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
ch := make(chan *types.CopyMessage)
go func() {
Expand Down
28 changes: 14 additions & 14 deletions cluster/calcium/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
func (c *Calcium) CreateWorkload(ctx context.Context, opts *types.DeployOptions) (chan *types.CreateWorkloadMessage, error) {
logger := log.WithField("Calcium", "CreateWorkload").WithField("opts", opts)
if err := opts.Validate(); err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}

opts.ProcessIdent = utils.RandomString(16)
Expand Down Expand Up @@ -97,7 +97,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
// then: deploy workloads
func(ctx context.Context) (err error) {
rollbackMap, err = c.doDeployWorkloads(ctx, ch, opts, plans, deployMap)
return errors.WithStack(err)
return err
},

// rollback: give back resources
Expand All @@ -115,7 +115,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
err = e
}
}
return errors.WithStack(err)
return err
},

c.config.GlobalTimeout,
Expand Down Expand Up @@ -145,7 +145,7 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context, ch chan *types.CreateWo

wg.Wait()
log.Debugf("[Calcium.doDeployWorkloads] rollbackMap: %+v", rollbackMap)
return rollbackMap, errors.WithStack(err)
return rollbackMap, err
}

// deploy scheduled workloads on one node
Expand All @@ -156,7 +156,7 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr
for i := 0; i < deploy; i++ {
ch <- &types.CreateWorkloadMessage{Error: logger.Err(err)}
}
return utils.Range(deploy), errors.WithStack(err)
return utils.Range(deploy), err
}

pool, appendLock := utils.NewGoroutinePool(int(c.config.MaxConcurrency)), sync.Mutex{}
Expand Down Expand Up @@ -194,7 +194,7 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr

createMsg.ResourceMeta = *r
createOpts := c.doMakeWorkloadOptions(seq+idx, createMsg, opts, node)
e = errors.WithStack(c.doDeployOneWorkload(ctx, node, opts, createMsg, createOpts, deploy-1-idx))
e = c.doDeployOneWorkload(ctx, node, opts, createMsg, createOpts, deploy-1-idx)
}
}(idx))
}
Expand All @@ -209,16 +209,16 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr
}); err != nil {
logger.Errorf("failed to lock node to remap: %v", err)
}
return indices, errors.WithStack(err)
return indices, err
}

func (c *Calcium) doGetAndPrepareNode(ctx context.Context, nodename, image string) (*types.Node, error) {
node, err := c.GetNode(ctx, nodename)
if err != nil {
return nil, errors.WithStack(err)
return nil, err
}

return node, errors.WithStack(pullImage(ctx, node, image))
return node, pullImage(ctx, node, image)
}

// transaction: workload metadata consistency
Expand Down Expand Up @@ -277,7 +277,7 @@ func (c *Calcium) doDeployOneWorkload(
// so there's a time gap window, once the core process crashes between
// VirtualizationCreate and logCreateWorkload then the worload is leaky.
if commit, err = c.wal.logCreateWorkload(ctx, workload.ID, node.Name); err != nil {
return errors.WithStack(err)
return err
}
return nil
},
Expand All @@ -291,7 +291,7 @@ func (c *Calcium) doDeployOneWorkload(
return errors.WithStack(err)
}
if err = c.doSendFileToWorkload(ctx, node.Engine, workload.ID, dst, reader, true, false); err != nil {
return errors.WithStack(err)
return err
}
}
}
Expand All @@ -314,14 +314,14 @@ func (c *Calcium) doDeployOneWorkload(
// start first
msg.Hook, err = c.doStartWorkload(ctx, workload, opts.IgnoreHook)
if err != nil {
return errors.WithStack(err)
return err
}

// inspect real meta
var workloadInfo *enginetypes.VirtualizationInfo
workloadInfo, err = workload.Inspect(ctx) // 补充静态元数据
if err != nil {
return errors.WithStack(err)
return err
}

// update meta
Expand Down Expand Up @@ -357,7 +357,7 @@ func (c *Calcium) doDeployOneWorkload(
if workload.ID == "" {
return nil
}
return errors.WithStack(c.doRemoveWorkload(ctx, workload, true))
return c.doRemoveWorkload(ctx, workload, true)
},
c.config.GlobalTimeout,
)
Expand Down
2 changes: 1 addition & 1 deletion cluster/calcium/dissociate.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t
nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, ids)
if err != nil {
logger.Errorf("failed to group workloads by node: %+v", err)
return nil, errors.WithStack(err)
return nil, err
}

ch := make(chan *types.DissociateWorkloadMessage)
Expand Down
8 changes: 4 additions & 4 deletions cluster/calcium/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ import (
func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (chan *types.RemoveImageMessage, error) {
logger := log.WithField("Calcium", "RemoveImage").WithField("opts", opts)
if err := opts.Validate(); err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
opts.Normalize()

nodes, err := c.getNodes(ctx, opts.Podname, opts.Nodenames, nil, false)
if err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}

if len(nodes) == 0 {
Expand Down Expand Up @@ -77,13 +77,13 @@ func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (ch
func (c *Calcium) CacheImage(ctx context.Context, opts *types.ImageOptions) (chan *types.CacheImageMessage, error) {
logger := log.WithField("Calcium", "CacheImage").WithField("opts", opts)
if err := opts.Validate(); err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
opts.Normalize()

nodes, err := c.getNodes(ctx, opts.Podname, opts.Nodenames, nil, false)
if err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}

if len(nodes) == 0 {
Expand Down
6 changes: 3 additions & 3 deletions cluster/calcium/lambda.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ const (
func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inCh <-chan []byte) (<-chan *types.AttachWorkloadMessage, error) {
logger := log.WithField("Calcium", "RunAndWait").WithField("opts", opts)
if err := opts.Validate(); err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
opts.Lambda = true
// count = 1 && OpenStdin
Expand All @@ -38,12 +38,12 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC

commit, err := c.walCreateLambda(ctx, opts)
if err != nil {
return nil, logger.Err(errors.WithStack(err))
return nil, logger.Err(err)
}
createChan, err := c.CreateWorkload(ctx, opts)
if err != nil {
logger.Errorf("[RunAndWait] Create workload error %+v", err)
return nil, errors.WithStack(err)
return nil, err
}

runMsgCh := make(chan *types.AttachWorkloadMessage)
Expand Down
Loading

0 comments on commit f67f8af

Please sign in to comment.