Skip to content

Commit

Permalink
pass lint
Browse files Browse the repository at this point in the history
  • Loading branch information
jschwinger233 committed Oct 13, 2020
1 parent 3bbb7c1 commit 8007dee
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 28 deletions.
11 changes: 7 additions & 4 deletions cluster/calcium/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio

for nodename := range deployMap {
if e := c.store.DeleteProcessing(ctx, opts, nodename); e != nil {
err = e
log.Errorf("[Calcium.doCreateWorkloads] delete processing failed for %s: %+v", nodename, err)
}
}
Expand Down Expand Up @@ -104,7 +105,8 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio

// rollback: give back resources
func(ctx context.Context) (err error) {
for nodeName, indices := range rollbackMap {
for nodeName, rollbackIndices := range rollbackMap {
indices := rollbackIndices
if e := c.withNodeLocked(ctx, nodeName, func(node *types.Node) error {
for _, plan := range planMap {
plan.RollbackChangesOnNode(node, indices...)
Expand All @@ -123,7 +125,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
}
}()

return ch, nil
return ch, err
}

func (c *Calcium) doDeployWorkloads(ctx context.Context, ch chan *types.CreateContainerMessage, opts *types.DeployOptions, planMap map[types.ResourceType]types.ResourcePlans, deployMap map[string]*types.DeployInfo) (_ map[string][]int, err error) {
Expand Down Expand Up @@ -166,7 +168,8 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr
Publish: map[string][]string{},
}

func() (e error) {
func() {
var e error
defer func() {
if e != nil {
err = e
Expand All @@ -187,7 +190,7 @@ func (c *Calcium) doDeployWorkloadsOnNode(ctx context.Context, ch chan *types.Cr
}
}

return c.doDeployOneWorkload(ctx, node, opts, createMsg, seq+idx, deployInfo.Deploy-1-idx)
e = c.doDeployOneWorkload(ctx, node, opts, createMsg, seq+idx, deployInfo.Deploy-1-idx)
}()
}

Expand Down
16 changes: 6 additions & 10 deletions cluster/calcium/realloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ import (
// nodename -> container list
type nodeContainers map[string][]*types.Container

// volume:cpu:memory
type volCPUMemNodeContainers map[string]map[float64]map[int64]nodeContainers

// ReallocResource allow realloc container resource
func (c *Calcium) ReallocResource(ctx context.Context, opts *types.ReallocOptions) (chan *types.ReallocResourceMessage, error) {
ch := make(chan *types.ReallocResourceMessage)
Expand Down Expand Up @@ -75,7 +72,7 @@ func (c *Calcium) ReallocResource(ctx context.Context, opts *types.ReallocOption
}

// group containers by node and requests
func (c *Calcium) doReallocContainersOnPod(ctx context.Context, ch chan *types.ReallocResourceMessage, nodeContainersInfo nodeContainers, opts *types.ReallocOptions) (err error) {
func (c *Calcium) doReallocContainersOnPod(ctx context.Context, ch chan *types.ReallocResourceMessage, nodeContainersInfo nodeContainers, opts *types.ReallocOptions) {
hardVbsMap := map[string]types.VolumeBindings{}
containerGroups := map[string]map[[3]types.ResourceRequest][]*types.Container{}
for nodename, containers := range nodeContainersInfo {
Expand All @@ -84,7 +81,7 @@ func (c *Calcium) doReallocContainersOnPod(ctx context.Context, ch chan *types.R
vbs, err := types.MergeVolumeBindings(container.Volumes, opts.Volumes)
if err != nil {
ch <- &types.ReallocResourceMessage{Error: err}
return errors.WithStack(err)
return
}
var autoVbs types.VolumeBindings
autoVbs, hardVbsMap[container.Name] = vbs.Divide()
Expand All @@ -105,7 +102,7 @@ func (c *Calcium) doReallocContainersOnPod(ctx context.Context, ch chan *types.R
for _, req := range reqs {
if err = req.DeployValidate(); err != nil {
ch <- &types.ReallocResourceMessage{Error: err}
return errors.WithStack(err)
return
}
}
containerGroups[nodename][reqs] = append(containerGroups[nodename][reqs], container)
Expand All @@ -114,17 +111,16 @@ func (c *Calcium) doReallocContainersOnPod(ctx context.Context, ch chan *types.R

for nodename, containerByReq := range containerGroups {
for reqs, containers := range containerByReq {
if err := c.doReallocContainersOnNode(ctx, ch, nodename, containers, []types.ResourceRequest{reqs[0], reqs[1], reqs[2]}, opts, hardVbsMap); err != nil {
if err := c.doReallocContainersOnNode(ctx, ch, nodename, containers, []types.ResourceRequest{reqs[0], reqs[1], reqs[2]}, hardVbsMap); err != nil {

ch <- &types.ReallocResourceMessage{Error: err}
}
}
}
return nil
}

// transaction: node meta
func (c *Calcium) doReallocContainersOnNode(ctx context.Context, ch chan *types.ReallocResourceMessage, nodename string, containers []*types.Container, newReqs []types.ResourceRequest, opts *types.ReallocOptions, hardVbsMap map[string]types.VolumeBindings) (err error) {
func (c *Calcium) doReallocContainersOnNode(ctx context.Context, ch chan *types.ReallocResourceMessage, nodename string, containers []*types.Container, newReqs []types.ResourceRequest, hardVbsMap map[string]types.VolumeBindings) (err error) {
{
return c.withNodeLocked(ctx, nodename, func(node *types.Node) error {
planMap, total, _, err := scheduler.SelectNodes(newReqs, map[string]*types.Node{node.Name: node})
Expand Down Expand Up @@ -216,7 +212,7 @@ func (c *Calcium) doUpdateResourceOnInstances(ctx context.Context, ch chan *type
}

wg.Wait()
return
return rollbacks, err
}

// transaction: container meta
Expand Down
9 changes: 0 additions & 9 deletions cluster/calcium/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,12 +180,3 @@ func (c *Calcium) doAllocResource(ctx context.Context, nodeMap map[string]*types

return planMap, deployMap, nil
}

func (c *Calcium) doBindProcessStatus(ctx context.Context, opts *types.DeployOptions, nodesInfo []types.NodeInfo) error {
for _, nodeInfo := range nodesInfo {
if err := c.store.SaveProcessing(ctx, opts, nodeInfo.Name, nodeInfo.Deploy); err != nil {
return err
}
}
return nil
}
4 changes: 0 additions & 4 deletions scheduler/complex/potassium.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@ import (
log "github.com/sirupsen/logrus"
)

var (
potassium *Potassium
)

// Potassium is a scheduler
type Potassium struct {
maxshare, sharebase int
Expand Down
2 changes: 1 addition & 1 deletion store/etcdv3/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (m *Mercury) MakeDeployStatus(ctx context.Context, opts *types.DeployOption
return m.doLoadProcessing(ctx, opts, strategyInfos)
}

func (m *Mercury) doGetDeployStatus(_ context.Context, resp *clientv3.GetResponse, strategyInfos []types.StrategyInfo) error { // nolint
func (m *Mercury) doGetDeployStatus(_ context.Context, resp *clientv3.GetResponse, strategyInfos []types.StrategyInfo) error {
nodesCount := map[string]int{}
for _, ev := range resp.Kvs {
key := string(ev.Key)
Expand Down

0 comments on commit 8007dee

Please sign in to comment.