diff --git a/.golangci.yaml b/.golangci.yaml index 9dc1dc3a339..f8c8a9522f7 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -163,6 +163,7 @@ linters: - gocritic - nolintlint - stylecheck + - contextcheck run: timeout: 15m diff --git a/agent/runner.go b/agent/runner.go index df353f60f30..21413f9ef4f 100644 --- a/agent/runner.go +++ b/agent/runner.go @@ -133,12 +133,13 @@ func (r *Runner) Run(runnerCtx context.Context) error { state := rpc.State{} state.Started = time.Now().Unix() - err = r.client.Init(ctxmeta, work.ID, state) + err = r.client.Init(runnerCtx, work.ID, state) if err != nil { logger.Error().Err(err).Msg("pipeline initialization failed") } var uploads sync.WaitGroup + //nolint:contextcheck err = pipeline.New(work.Config, pipeline.WithContext(workflowCtx), pipeline.WithTaskUUID(fmt.Sprint(work.ID)), @@ -188,7 +189,7 @@ func (r *Runner) Run(runnerCtx context.Context) error { Int("exit_code", state.ExitCode). Msg("updating pipeline status") - if err := r.client.Done(ctxmeta, work.ID, state); err != nil { + if err := r.client.Done(runnerCtx, work.ID, state); err != nil { logger.Error().Err(err).Msg("updating pipeline status failed") } else { logger.Debug().Msg("updating pipeline status complete") diff --git a/cli/exec/exec.go b/cli/exec/exec.go index 9568316916b..b78a1a0a553 100644 --- a/cli/exec/exec.go +++ b/cli/exec/exec.go @@ -213,7 +213,7 @@ func execWithAxis(c *cli.Context, file, repoPath string, axis matrix.Axis) error } backendCtx := context.WithValue(c.Context, backendTypes.CliContext, c) - backend.Init(backendCtx) + backend.Init() backendEngine, err := backend.FindBackend(backendCtx, c.String("backend-engine")) if err != nil { diff --git a/cmd/agent/agent.go b/cmd/agent/agent.go index 7c39773bc29..4ee9ca0074c 100644 --- a/cmd/agent/agent.go +++ b/cmd/agent/agent.go @@ -259,7 +259,7 @@ func getBackendEngine(backendCtx context.Context, backendName string, addons []s return addonBackend.Value, nil } - backend.Init(backendCtx) + backend.Init() engine, err := backend.FindBackend(backendCtx, backendName) if err != nil { log.Error().Err(err).Msgf("cannot find backend engine '%s'", backendName) diff --git a/pipeline/backend/backend.go b/pipeline/backend/backend.go index 56d3291d75a..99ecb69134a 100644 --- a/pipeline/backend/backend.go +++ b/pipeline/backend/backend.go @@ -29,11 +29,11 @@ var ( backends []types.Backend ) -func Init(ctx context.Context) { +func Init() { backends = []types.Backend{ docker.New(), local.New(), - kubernetes.New(ctx), + kubernetes.New(), } backendsByName = make(map[string]types.Backend) diff --git a/pipeline/backend/docker/docker.go b/pipeline/backend/docker/docker.go index 0e9c5429484..24069106276 100644 --- a/pipeline/backend/docker/docker.go +++ b/pipeline/backend/docker/docker.go @@ -147,11 +147,11 @@ func (e *docker) Load(ctx context.Context) (*backend.BackendInfo, error) { }, nil } -func (e *docker) SetupWorkflow(_ context.Context, conf *backend.Config, taskUUID string) error { +func (e *docker) SetupWorkflow(ctx context.Context, conf *backend.Config, taskUUID string) error { log.Trace().Str("taskUUID", taskUUID).Msg("create workflow environment") for _, vol := range conf.Volumes { - _, err := e.client.VolumeCreate(noContext, volume.CreateOptions{ + _, err := e.client.VolumeCreate(ctx, volume.CreateOptions{ Name: vol.Name, Driver: volumeDriver, }) @@ -165,7 +165,7 @@ func (e *docker) SetupWorkflow(_ context.Context, conf *backend.Config, taskUUID networkDriver = networkDriverNAT } for _, n := range conf.Networks { - _, err := e.client.NetworkCreate(noContext, n.Name, types.NetworkCreate{ + _, err := e.client.NetworkCreate(ctx, n.Name, types.NetworkCreate{ Driver: networkDriver, EnableIPv6: e.enableIPv6, }) @@ -311,27 +311,27 @@ func (e *docker) DestroyStep(ctx context.Context, step *backend.Step, taskUUID s return nil } -func (e *docker) DestroyWorkflow(_ context.Context, conf *backend.Config, taskUUID string) error { +func (e *docker) DestroyWorkflow(ctx context.Context, conf *backend.Config, taskUUID string) error { log.Trace().Str("taskUUID", taskUUID).Msgf("delete workflow environment") for _, stage := range conf.Stages { for _, step := range stage.Steps { containerName := toContainerName(step) - if err := e.client.ContainerKill(noContext, containerName, "9"); err != nil && !isErrContainerNotFoundOrNotRunning(err) { + if err := e.client.ContainerKill(ctx, containerName, "9"); err != nil && !isErrContainerNotFoundOrNotRunning(err) { log.Error().Err(err).Msgf("could not kill container '%s'", step.Name) } - if err := e.client.ContainerRemove(noContext, containerName, removeOpts); err != nil && !isErrContainerNotFoundOrNotRunning(err) { + if err := e.client.ContainerRemove(ctx, containerName, removeOpts); err != nil && !isErrContainerNotFoundOrNotRunning(err) { log.Error().Err(err).Msgf("could not remove container '%s'", step.Name) } } } for _, v := range conf.Volumes { - if err := e.client.VolumeRemove(noContext, v.Name, true); err != nil { + if err := e.client.VolumeRemove(ctx, v.Name, true); err != nil { log.Error().Err(err).Msgf("could not remove volume '%s'", v.Name) } } for _, n := range conf.Networks { - if err := e.client.NetworkRemove(noContext, n.Name); err != nil { + if err := e.client.NetworkRemove(ctx, n.Name); err != nil { log.Error().Err(err).Msgf("could not remove network '%s'", n.Name) } } @@ -339,8 +339,6 @@ func (e *docker) DestroyWorkflow(_ context.Context, conf *backend.Config, taskUU } var ( - noContext = context.Background() - startOpts = types.ContainerStartOptions{} removeOpts = types.ContainerRemoveOptions{ diff --git a/pipeline/backend/kubernetes/kubernetes.go b/pipeline/backend/kubernetes/kubernetes.go index a42beb99ec1..1a3cd524fd7 100644 --- a/pipeline/backend/kubernetes/kubernetes.go +++ b/pipeline/backend/kubernetes/kubernetes.go @@ -47,7 +47,6 @@ const ( var defaultDeleteOptions = newDefaultDeleteOptions() type kube struct { - ctx context.Context client kubernetes.Interface config *config goos string @@ -117,10 +116,8 @@ func configFromCliContext(ctx context.Context) (*config, error) { } // New returns a new Kubernetes Backend. -func New(ctx context.Context) types.Backend { - return &kube{ - ctx: ctx, - } +func New() types.Backend { + return &kube{} } func (e *kube) Name() string { @@ -132,8 +129,8 @@ func (e *kube) IsAvailable(context.Context) bool { return len(host) > 0 } -func (e *kube) Load(context.Context) (*types.BackendInfo, error) { - config, err := configFromCliContext(e.ctx) +func (e *kube) Load(ctx context.Context) (*types.BackendInfo, error) { + config, err := configFromCliContext(ctx) if err != nil { return nil, err } @@ -334,31 +331,31 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string) return rc, nil } -func (e *kube) DestroyStep(_ context.Context, step *types.Step, taskUUID string) error { +func (e *kube) DestroyStep(ctx context.Context, step *types.Step, taskUUID string) error { if step.Type == types.StepTypeService { // a service should be stopped by DestroyWorkflow so we can ignore it - log.Trace().Msgf("DestroyStep got service '%s', ignoring it.", step.Name) + log.Trace().Msgf("destroyStep got service '%s', ignoring it.", step.Name) return nil } - log.Trace().Str("taskUUID", taskUUID).Msgf("stopping step: %s", step.Name) - err := stopPod(e.ctx, e, step, defaultDeleteOptions) + log.Trace().Str("taskUUID", taskUUID).Msgf("Stopping step: %s", step.Name) + err := stopPod(ctx, e, step, defaultDeleteOptions) return err } // Destroy the pipeline environment. -func (e *kube) DestroyWorkflow(_ context.Context, conf *types.Config, taskUUID string) error { +func (e *kube) DestroyWorkflow(ctx context.Context, conf *types.Config, taskUUID string) error { log.Trace().Str("taskUUID", taskUUID).Msg("deleting Kubernetes primitives") // Use noContext because the ctx sent to this function will be canceled/done in case of error or canceled by user. for _, stage := range conf.Stages { for _, step := range stage.Steps { - err := stopPod(e.ctx, e, step, defaultDeleteOptions) + err := stopPod(ctx, e, step, defaultDeleteOptions) if err != nil { return err } if step.Type == types.StepTypeService { - err := stopService(e.ctx, e, step, defaultDeleteOptions) + err := stopService(ctx, e, step, defaultDeleteOptions) if err != nil { return err } @@ -367,7 +364,7 @@ func (e *kube) DestroyWorkflow(_ context.Context, conf *types.Config, taskUUID s } for _, vol := range conf.Volumes { - err := stopVolume(e.ctx, e, vol.Name, defaultDeleteOptions) + err := stopVolume(ctx, e, vol.Name, defaultDeleteOptions) if err != nil { return err } diff --git a/pipeline/pipeline.go b/pipeline/pipeline.go index 9130fff78f7..8be2338aabc 100644 --- a/pipeline/pipeline.go +++ b/pipeline/pipeline.go @@ -112,7 +112,7 @@ func (r *Runtime) Run(runnerCtx context.Context) error { }() r.started = time.Now().Unix() - if err := r.engine.SetupWorkflow(r.ctx, r.spec, r.taskUUID); err != nil { + if err := r.engine.SetupWorkflow(runnerCtx, r.spec, r.taskUUID); err != nil { return err } diff --git a/server/pipeline/queue.go b/server/pipeline/queue.go index 9b845ce91cb..c8b1fc77ee5 100644 --- a/server/pipeline/queue.go +++ b/server/pipeline/queue.go @@ -25,7 +25,7 @@ import ( "go.woodpecker-ci.org/woodpecker/v2/server/pipeline/stepbuilder" ) -func queuePipeline(repo *model.Repo, pipelineItems []*stepbuilder.Item) error { +func queuePipeline(ctx context.Context, repo *model.Repo, pipelineItems []*stepbuilder.Item) error { var tasks []*model.Task for _, item := range pipelineItems { if item.Workflow.State == model.StatusSkipped { @@ -54,7 +54,7 @@ func queuePipeline(repo *model.Repo, pipelineItems []*stepbuilder.Item) error { tasks = append(tasks, task) } - return server.Config.Services.Queue.PushAtOnce(context.Background(), tasks) + return server.Config.Services.Queue.PushAtOnce(ctx, tasks) } func taskIds(dependsOn []string, pipelineItems []*stepbuilder.Item) (taskIds []string) { diff --git a/server/pipeline/start.go b/server/pipeline/start.go index 16ec1ea1a49..cb9d77939ba 100644 --- a/server/pipeline/start.go +++ b/server/pipeline/start.go @@ -39,7 +39,7 @@ func start(ctx context.Context, store store.Store, activePipeline *model.Pipelin publishPipeline(ctx, activePipeline, repo, user) - if err := queuePipeline(repo, pipelineItems); err != nil { + if err := queuePipeline(ctx, repo, pipelineItems); err != nil { log.Error().Err(err).Msg("queuePipeline") return nil, err }