Skip to content

Commit

Permalink
remove usless error output in log
Browse files Browse the repository at this point in the history
  • Loading branch information
CMGS committed Nov 1, 2022
1 parent 5e76472 commit 269ad1b
Show file tree
Hide file tree
Showing 78 changed files with 254 additions and 247 deletions.
4 changes: 2 additions & 2 deletions client/clientpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func NewCoreRPCClientPool(ctx context.Context, config *PoolConfig) (*Pool, error
rpc, err = NewClient(ctx, addr, config.Auth)
})
if err != nil {
log.Errorf(ctx, err, "[NewCoreRPCClientPool] connect to %s failed, err: %s", addr, err)
log.Errorf(ctx, err, "[NewCoreRPCClientPool] connect to %s failed", addr)
continue
}
rpcClient := rpc.GetRPCClient()
Expand Down Expand Up @@ -96,7 +96,7 @@ func checkAlive(ctx context.Context, rpc *clientWithStatus, timeout time.Duratio
_, err = rpc.client.Info(ctx, &pb.Empty{})
})
if err != nil {
log.Errorf(ctx, err, "[ClientPool] connect to %s failed, err: %s", rpc.addr, err)
log.Errorf(ctx, err, "[ClientPool] connect to %s failed", rpc.addr)
return false
}
log.Debugf(ctx, "[ClientPool] connect to %s success", rpc.addr)
Expand Down
4 changes: 2 additions & 2 deletions client/resolver/eru/resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,13 @@ func (r *Resolver) sync() {

ch, err := r.discovery.Watch(ctx)
if err != nil {
log.Errorf(ctx, err, "[EruResolver] failed to watch service status: %v", err)
log.Error(ctx, err, "[EruResolver] failed to watch service status")
return
}
for {
select {
case <-ctx.Done():
log.Errorf(ctx, ctx.Err(), "[EruResolver] watch interrupted: %v", ctx.Err())
log.Error(ctx, ctx.Err(), "[EruResolver] watch interrupted")
return
case endpoints, ok := <-ch:
if !ok {
Expand Down
6 changes: 3 additions & 3 deletions client/servicediscovery/eru_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func New(endpoint string, authConfig types.AuthConfig) *EruServiceDiscovery {
func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err error) {
cc, err := w.dial(ctx, w.endpoint, w.authConfig)
if err != nil {
log.Errorf(ctx, err, "[EruServiceWatch] dial failed: %v", err)
log.Error(ctx, err, "[EruServiceWatch] dial failed")
return
}
client := pb.NewCoreRPCClient(cc)
Expand All @@ -48,7 +48,7 @@ func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err
watchCtx, cancelWatch := context.WithCancel(ctx)
stream, err := client.WatchServiceStatus(watchCtx, &pb.Empty{})
if err != nil {
log.Errorf(ctx, err, "[EruServiceWatch] watch failed, try later: %v", err)
log.Error(ctx, err, "[EruServiceWatch] watch failed, try later")
time.Sleep(10 * time.Second)
continue
}
Expand All @@ -69,7 +69,7 @@ func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err
status, err := stream.Recv()
close(cancelTimer)
if err != nil {
log.Errorf(ctx, err, "[EruServiceWatch] recv failed: %v", err)
log.Error(ctx, err, "[EruServiceWatch] recv failed")
break
}
expectedInterval = time.Duration(status.GetIntervalInSecond())
Expand Down
2 changes: 1 addition & 1 deletion client/utils/servicepusher.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func (p *EndpointPusher) pollReachability(ctx context.Context, endpoint string)
func (p *EndpointPusher) checkReachability(host string) (err error) {
pinger, err := ping.NewPinger(host)
if err != nil {
log.Errorf(nil, err, "[EruResolver] failed to create pinger: %+v", err) //nolint
log.Error(nil, err, "[EruResolver] failed to create pinger") //nolint
return
}
pinger.SetPrivileged(os.Getuid() == 0)
Expand Down
14 changes: 7 additions & 7 deletions cluster/calcium/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"os"
"time"

"github.com/pkg/errors"
enginetypes "github.com/projecteru2/core/engine/types"
"github.com/projecteru2/core/log"
"github.com/projecteru2/core/types"
Expand Down Expand Up @@ -140,28 +141,27 @@ func (c *Calcium) pushImageAndClean(ctx context.Context, resp io.ReadCloser, nod
lastMessage := &types.BuildImageMessage{}
for {
message := &types.BuildImageMessage{}
err := decoder.Decode(message)
if err != nil {
if err := decoder.Decode(message); err != nil {
if err == io.EOF {
break
}
if err == context.Canceled || err == context.DeadlineExceeded {
log.Errorf(ctx, err, "[BuildImage] context timeout")
log.Error(ctx, err, "[BuildImage] context timeout")
lastMessage.ErrorDetail.Code = -1
lastMessage.ErrorDetail.Message = err.Error()
lastMessage.Error = err.Error()
break
}
malformed, _ := io.ReadAll(decoder.Buffered()) // TODO err check
logger.Errorf(ctx, nil, "[BuildImage] Decode build image message failed %+v, buffered: %v", err, malformed)
logger.Errorf(ctx, err, "[BuildImage] Decode build image message failed, buffered: %v", malformed)
return
}
ch <- message
lastMessage = message
}

if lastMessage.Error != "" {
log.Errorf(ctx, nil, "[BuildImage] Build image failed %v", lastMessage.ErrorDetail.Message)
logger.Errorf(ctx, errors.New(lastMessage.Error), "[BuildImage] Build image failed %v", lastMessage.ErrorDetail.Message)
return
}

Expand Down Expand Up @@ -216,11 +216,11 @@ func cleanupNodeImages(ctx context.Context, node *types.Node, ids []string, ttl
defer cancel()
for _, id := range ids {
if _, err := node.Engine.ImageRemove(ctx, id, false, true); err != nil {
logger.Errorf(ctx, err, "[BuildImage] Remove image error: %+v", err)
logger.Error(ctx, err, "[BuildImage] Remove image error")
}
}
if spaceReclaimed, err := node.Engine.ImageBuildCachePrune(ctx, true); err != nil {
logger.Errorf(ctx, err, "[BuildImage] Remove build image cache error: %+v", err)
logger.Error(ctx, err, "[BuildImage] Remove build image cache error")
} else {
logger.Infof(ctx, "[BuildImage] Clean cached image and release space %d", spaceReclaimed)
}
Expand Down
6 changes: 3 additions & 3 deletions cluster/calcium/calcium.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func New(ctx context.Context, config types.Config, t *testing.T) (*Calcium, erro
log.Warn(ctx, "[Calcium] SCM not set, build API disabled")
}
if err != nil {
log.Errorf(ctx, err, "[Calcium] SCM failed: %+v", err)
log.Error(ctx, err, "[Calcium] SCM failed")
return nil, err
}

Expand All @@ -72,12 +72,12 @@ func New(ctx context.Context, config types.Config, t *testing.T) (*Calcium, erro
// load internal plugins
cpumem, err := cpumem.NewPlugin(config)
if err != nil {
log.Errorf(ctx, err, "[NewPluginManager] new cpumem plugin error: %v", err)
log.Error(ctx, err, "[NewPluginManager] new cpumem plugin error")
return nil, err
}
volume, err := volume.NewPlugin(config)
if err != nil {
log.Errorf(ctx, err, "[NewPluginManager] new volume plugin error: %v", err)
log.Error(ctx, err, "[NewPluginManager] new volume plugin error")
return nil, err
}
rmgr.AddPlugins(cpumem, volume)
Expand Down
4 changes: 2 additions & 2 deletions cluster/calcium/capacity.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio

if opts.DeployStrategy != strategy.Dummy {
if msg.NodeCapacities, err = c.doGetDeployStrategy(ctx, nodenames, opts); err != nil {
logger.Errorf(ctx, err, "[Calcium.CalculateCapacity] doGetDeployMap failed: %+v", err)
logger.Error(ctx, err, "[Calcium.CalculateCapacity] doGetDeployMap failed")
return err
}

Expand All @@ -41,7 +41,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio
var infos map[string]*resources.NodeCapacityInfo
infos, msg.Total, err = c.rmgr.GetNodesDeployCapacity(ctx, nodenames, opts.ResourceOpts)
if err != nil {
logger.Errorf(ctx, err, "[Calcium.CalculateCapacity] failed to get nodes capacity: %+v", err)
logger.Error(ctx, err, "[Calcium.CalculateCapacity] failed to get nodes capacity")
return err
}
if msg.Total <= 0 {
Expand Down
12 changes: 6 additions & 6 deletions cluster/calcium/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
for nodename := range deployMap {
processing := opts.GetProcessing(nodename)
if err := c.store.DeleteProcessing(cctx, processing); err != nil {
logger.Errorf(ctx, err, "[Calcium.doCreateWorkloads] delete processing failed for %s: %+v", nodename, err)
logger.Errorf(ctx, err, "[Calcium.doCreateWorkloads] delete processing failed for %s", nodename)
}
}
close(ch)
Expand All @@ -73,7 +73,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
defer func() {
if resourceCommit != nil {
if err := resourceCommit(); err != nil {
logger.Errorf(ctx, err, "commit wal failed: %s, %+v", eventWorkloadResourceAllocated, err)
logger.Errorf(ctx, err, "commit wal failed: %s", eventWorkloadResourceAllocated)
}
}
}()
Expand All @@ -85,7 +85,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
continue
}
if err := processingCommits[nodename](); err != nil {
logger.Errorf(ctx, err, "commit wal failed: %s, %s, %+v", eventProcessingCreated, nodename, err)
logger.Errorf(ctx, err, "commit wal failed: %s, %s", eventProcessingCreated, nodename)
}
}
}()
Expand Down Expand Up @@ -186,7 +186,7 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context,
for nodename, deploy := range deployMap {
_ = c.pool.Invoke(func(deploy int) func() {
return func() {
metrics.Client.SendDeployCount(deploy)
metrics.Client.SendDeployCount(ctx, deploy)
}
}(deploy))
_ = c.pool.Invoke(func(nodename string, deploy, seq int) func() {
Expand Down Expand Up @@ -322,7 +322,7 @@ func (c *Calcium) doDeployOneWorkload(
defer func() {
if commit != nil {
if err := commit(); err != nil {
logger.Errorf(ctx, err, "Commit WAL %s failed: %+v", eventWorkloadCreated, err)
logger.Errorf(ctx, err, "Commit WAL %s failed", eventWorkloadCreated)
}
}
}()
Expand Down Expand Up @@ -427,7 +427,7 @@ func (c *Calcium) doDeployOneWorkload(

// remove workload
func(ctx context.Context, _ bool) error {
logger.Errorf(ctx, nil, "[doDeployOneWorkload] failed to deploy workload %s, rollback", workload.ID)
logger.Infof(ctx, "[doDeployOneWorkload] failed to deploy workload %s, rollback", workload.ID)
if workload.ID == "" {
return nil
}
Expand Down
6 changes: 3 additions & 3 deletions cluster/calcium/dissociate.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t

nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, ids)
if err != nil {
logger.Errorf(ctx, err, "failed to group workloads by node: %+v", err)
logger.Error(ctx, err, "failed to group workloads by node")
return nil, err
}

Expand Down Expand Up @@ -58,15 +58,15 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t
c.config.GlobalTimeout,
)
}); err != nil {
logger.WithField("id", workloadID).Errorf(ctx, err, "failed to lock workload: %+v", err)
logger.WithField("id", workloadID).Error(ctx, err, "failed to lock workload")
msg.Error = err
}
ch <- msg
}
_ = c.pool.Invoke(func() { c.doRemapResourceAndLog(ctx, logger, node) })
return nil
}); err != nil {
logger.WithField("nodename", nodename).Errorf(ctx, err, "failed to lock node: %+v", err)
logger.WithField("nodename", nodename).Error(ctx, err, "failed to lock node")
}
}
})
Expand Down
6 changes: 3 additions & 3 deletions cluster/calcium/execute.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo

workload, err := c.GetWorkload(ctx, opts.WorkloadID)
if err != nil {
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to get workload: %+v", err)
logger.Error(ctx, err, "[ExecuteWorkload] Failed to get workload")
return
}

Expand All @@ -44,7 +44,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo

execID, stdout, stderr, inStream, err := workload.Engine.Execute(ctx, opts.WorkloadID, execConfig)
if err != nil {
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to attach execID: %+v", err)
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to attach execID %s", execID)
return
}

Expand All @@ -62,7 +62,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo

execCode, err := workload.Engine.ExecExitCode(ctx, opts.WorkloadID, execID)
if err != nil {
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to get exitcode: %+v", err)
logger.Error(ctx, err, "[ExecuteWorkload] Failed to get exitcode")
return
}

Expand Down
4 changes: 2 additions & 2 deletions cluster/calcium/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
func distributionInspect(ctx context.Context, node *types.Node, image string, digests []string) bool {
remoteDigest, err := node.Engine.ImageRemoteDigest(ctx, image)
if err != nil {
log.Errorf(ctx, err, "[distributionInspect] get manifest failed %v", err)
log.Error(ctx, err, "[distributionInspect] get manifest failed")
return false
}

Expand Down Expand Up @@ -51,7 +51,7 @@ func pullImage(ctx context.Context, node *types.Node, image string) error {
rc, err := node.Engine.ImagePull(ctx, image, false)
defer utils.EnsureReaderClosed(ctx, rc)
if err != nil {
log.Errorf(ctx, err, "[pullImage] Error during pulling image %s: %v", image, err)
log.Errorf(ctx, err, "[pullImage] Error during pulling image %s", image)
return err
}
log.Infof(ctx, "[pullImage] Done pulling image %s", image)
Expand Down
2 changes: 1 addition & 1 deletion cluster/calcium/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (ch
}
if opts.Prune {
if err := node.Engine.ImagesPrune(ctx); err != nil {
logger.Errorf(ctx, err, "[RemoveImage] Prune %s pod %s node failed: %+v", opts.Podname, node.Name, err)
logger.Errorf(ctx, err, "[RemoveImage] Prune %s pod %s node failed", opts.Podname, node.Name)
} else {
logger.Infof(ctx, "[RemoveImage] Prune %s pod %s node", opts.Podname, node.Name)
}
Expand Down
16 changes: 8 additions & 8 deletions cluster/calcium/lambda.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC

createChan, err := c.CreateWorkload(ctx, opts)
if err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Create workload error %+v", err)
logger.Error(ctx, err, "[RunAndWait] Create workload error")
return workloadIDs, nil, err
}

Expand All @@ -58,7 +58,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
// we don't need to remove this non-existing workload
// so just send the error message and return
if message.Error != nil || message.WorkloadID == "" {
logger.Errorf(ctx, message.Error, "[RunAndWait] Create workload failed %+v", message.Error)
logger.Error(ctx, message.Error, "[RunAndWait] Create workload failed")
return &types.AttachWorkloadMessage{
WorkloadID: "",
Data: []byte(fmt.Sprintf("Create workload failed %+v", message.Error)),
Expand All @@ -77,7 +77,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
}
defer func() {
if err := commit(); err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Commit WAL %s failed: %s, %v", eventCreateLambda, message.WorkloadID, err)
logger.Errorf(ctx, err, "[RunAndWait] Commit WAL %s failed: %s", eventCreateLambda, message.WorkloadID)
}
}()

Expand All @@ -87,7 +87,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
ctx, cancel := context.WithCancel(utils.InheritTracingInfo(ctx, context.TODO()))
defer cancel()
if err := c.doRemoveWorkloadSync(ctx, []string{message.WorkloadID}); err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Remove lambda workload failed %+v", err)
logger.Error(ctx, err, "[RunAndWait] Remove lambda workload failed")
} else {
logger.Infof(ctx, "[RunAndWait] Workload %s finished and removed", utils.ShortID(message.WorkloadID))
}
Expand All @@ -97,7 +97,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
// this is weird, we return the error directly and try to delete data
workload, err := c.GetWorkload(ctx, message.WorkloadID)
if err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Get workload failed %+v", err)
logger.Error(ctx, err, "[RunAndWait] Get workload failed")
return &types.AttachWorkloadMessage{
WorkloadID: message.WorkloadID,
Data: []byte(fmt.Sprintf("Get workload %s failed %+v", message.WorkloadID, err)),
Expand All @@ -115,7 +115,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
Stdout: true,
Stderr: true,
}); err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Can't fetch log of workload %s error %+v", message.WorkloadID, err)
logger.Errorf(ctx, err, "[RunAndWait] Can't fetch log of workload %s", message.WorkloadID)
return &types.AttachWorkloadMessage{
WorkloadID: message.WorkloadID,
Data: []byte(fmt.Sprintf("Fetch log for workload %s failed %+v", message.WorkloadID, err)),
Expand All @@ -130,7 +130,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
var inStream io.WriteCloser
stdout, stderr, inStream, err = workload.Engine.VirtualizationAttach(ctx, message.WorkloadID, true, true)
if err != nil {
logger.Errorf(ctx, err, "[RunAndWait] Can't attach workload %s error %+v", message.WorkloadID, err)
logger.Errorf(ctx, err, "[RunAndWait] Can't attach workload %s", message.WorkloadID)
return &types.AttachWorkloadMessage{
WorkloadID: message.WorkloadID,
Data: []byte(fmt.Sprintf("Attach to workload %s failed %+v", message.WorkloadID, err)),
Expand All @@ -156,7 +156,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
// wait and forward exitcode
r, err := workload.Engine.VirtualizationWait(ctx, message.WorkloadID, "")
if err != nil {
logger.Errorf(ctx, err, "[RunAndWait] %s wait failed %+v", utils.ShortID(message.WorkloadID), err)
logger.Errorf(ctx, err, "[RunAndWait] %s wait failed", utils.ShortID(message.WorkloadID))
return &types.AttachWorkloadMessage{
WorkloadID: message.WorkloadID,
Data: []byte(fmt.Sprintf("Wait workload %s failed %+v", message.WorkloadID, err)),
Expand Down
4 changes: 2 additions & 2 deletions cluster/calcium/lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func (c *Calcium) doLock(ctx context.Context, name string, timeout time.Duration
defer cancel()
rollbackCtx = utils.InheritTracingInfo(rollbackCtx, ctx)
if e := lock.Unlock(rollbackCtx); e != nil {
log.Errorf(rollbackCtx, err, "failed to unlock %s: %+v", name, err)
log.Errorf(rollbackCtx, err, "failed to unlock %s", name)
}
}
}()
Expand All @@ -47,7 +47,7 @@ func (c *Calcium) doUnlockAll(ctx context.Context, locks map[string]lock.Distrib
}
for _, key := range order {
if err := c.doUnlock(ctx, locks[key], key); err != nil {
log.Errorf(ctx, err, "[doUnlockAll] Unlock %s failed %v", key, err)
log.Errorf(ctx, err, "[doUnlockAll] Unlock %s failed", key)
continue
}
}
Expand Down
Loading

0 comments on commit 269ad1b

Please sign in to comment.