From 0b39fd6f2560511d93243666c7587c6c3e04a851 Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 18 Oct 2022 22:26:22 +0800 Subject: [PATCH] fix lint loop iterator problem Signed-off-by: Lyndon-Li --- golangci.yaml | 2 +- pkg/cmd/cli/backup/create.go | 10 +++------- pkg/cmd/cli/backup/describe.go | 4 ++-- pkg/cmd/cli/backuplocation/create.go | 4 ++-- pkg/cmd/cli/backuplocation/delete.go | 12 ++++++------ pkg/cmd/cli/backuplocation/set.go | 4 ++-- pkg/cmd/cli/nodeagent/server.go | 8 ++++---- pkg/cmd/cli/restore/create.go | 12 ++++-------- pkg/cmd/cli/restore/describe.go | 4 ++-- pkg/cmd/cli/schedule/describe.go | 4 ++-- pkg/cmd/server/server.go | 8 ++++---- pkg/controller/backup_deletion_controller.go | 12 ++++++------ pkg/controller/backup_sync_controller.go | 14 ++++++++------ pkg/nodeagent/node_agent.go | 6 +++--- pkg/restore/admissionwebhook_config_action.go | 6 +++--- pkg/util/kube/utils.go | 6 +++--- 16 files changed, 55 insertions(+), 61 deletions(-) diff --git a/golangci.yaml b/golangci.yaml index 8c6d87b63e8..e958170d028 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -117,7 +117,7 @@ linters-settings: # minimal length of string constant, 3 by default min-len: 3 # minimal occurrences count to trigger, 3 by default - min-occurrences: 3 + min-occurrences: 5 gocritic: # Which checks should be enabled; can't be combined with 'disabled-checks'; # See https://go-critic.github.io/overview#checks-overview diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 27c0be37298..52aeab3d527 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -112,10 +112,6 @@ func NewCreateOptions() *CreateOptions { } } -const ( - strTrue = "true" -) - func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.TTL, "ttl", o.TTL, "How long before the backup can be garbage collected.") flags.Var(&o.IncludeNamespaces, "include-namespaces", "Namespaces to include in the backup (use '*' for all namespaces).") @@ -131,13 +127,13 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup. If the parameter is not set, it is treated as setting to 'true'.") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the backup") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", "", "Use pod volume file system backup by default for volumes") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" } // BindWait binds the wait flag separately so it is not called by other create diff --git a/pkg/cmd/cli/backup/describe.go b/pkg/cmd/cli/backup/describe.go index 3e5ebbded33..1284e29c93d 100644 --- a/pkg/cmd/cli/backup/describe.go +++ b/pkg/cmd/cli/backup/describe.go @@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, backup := range backups.Items { + for i, backup := range backups.Items { deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID)) deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions) if err != nil { @@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } } - s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint + s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/backuplocation/create.go b/pkg/cmd/cli/backuplocation/create.go index ec19b6e56d0..e08e0c6ae29 100644 --- a/pkg/cmd/cli/backuplocation/create.go +++ b/pkg/cmd/cli/backuplocation/create.go @@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil { return errors.WithStack(err) } - for _, location := range locations.Items { + for i, location := range locations.Items { if location.Spec.Default { location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint + if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil { return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/backuplocation/delete.go b/pkg/cmd/cli/backuplocation/delete.go index d67ed543697..becb92afb3b 100644 --- a/pkg/cmd/cli/backuplocation/delete.go +++ b/pkg/cmd/cli/backuplocation/delete.go @@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error { } // Create a backup-location deletion request for each - for _, location := range locations.Items { - if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, location := range locations.Items { + if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(err)) continue } @@ -162,8 +162,8 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error { var errs []error - for _, backup := range backups.Items { - if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, backup := range backups.Items { + if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err))) continue } @@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error { var errs []error - for _, repo := range repos.Items { - if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, repo := range repos.Items { + if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err))) continue } diff --git a/pkg/cmd/cli/backuplocation/set.go b/pkg/cmd/cli/backuplocation/set.go index 44730d659d3..bdb36b867c2 100644 --- a/pkg/cmd/cli/backuplocation/set.go +++ b/pkg/cmd/cli/backuplocation/set.go @@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil { return errors.WithStack(err) } - for _, location := range locations.Items { + for i, location := range locations.Items { if !location.Spec.Default { continue } @@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { break } location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint + if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil { return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 7fb3dfc8275..b154f9bf81c 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -294,7 +294,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") return } - for _, pvb := range pvbs.Items { + for i, pvb := range pvbs.Items { if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress { s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase) continue @@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase) pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { //nolint + if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil { s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName()) continue } @@ -321,7 +321,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") return } - for _, pvr := range pvrs.Items { + for i, pvr := range pvrs.Items { if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress { s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase) continue @@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase) pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { //nolint + if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil { s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName()) continue } diff --git a/pkg/cmd/cli/restore/create.go b/pkg/cmd/cli/restore/create.go index 2ac45e8ee85..7625c1f7628 100644 --- a/pkg/cmd/cli/restore/create.go +++ b/pkg/cmd/cli/restore/create.go @@ -107,10 +107,6 @@ func NewCreateOptions() *CreateOptions { } } -const ( - strTrue = "true" -) - func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.BackupName, "from-backup", "", "Backup to restore from") flags.StringVar(&o.ScheduleName, "from-schedule", "", "Schedule to restore from") @@ -127,18 +123,18 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.") // this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.PreserveNodePorts, "preserve-nodeports", "", "Whether to preserve nodeports of Services when restoring.") // this allows the user to just specify "--preserve-nodeports" as shorthand for "--preserve-nodeports=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the restore.") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.AllowPartiallyFailed, "allow-partially-failed", "", "If using --from-schedule, whether to consider PartiallyFailed backups when looking for the most recent one. This flag has no effect if not using --from-schedule.") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "Wait for the operation to complete.") } diff --git a/pkg/cmd/cli/restore/describe.go b/pkg/cmd/cli/restore/describe.go index e8cbb8d885a..88514e8651b 100644 --- a/pkg/cmd/cli/restore/describe.go +++ b/pkg/cmd/cli/restore/describe.go @@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, restore := range restores.Items { + for i, restore := range restores.Items { opts := newPodVolumeRestoreListOptions(restore.Name) podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts) if err != nil { fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) } - s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint + s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/schedule/describe.go b/pkg/cmd/cli/schedule/describe.go index 5c64610436b..b43cad45fc3 100644 --- a/pkg/cmd/cli/schedule/describe.go +++ b/pkg/cmd/cli/schedule/describe.go @@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, schedule := range schedules.Items { - s := output.DescribeSchedule(&schedule) //nolint + for i := range schedules.Items { + s := output.DescribeSchedule(&schedules.Items[i]) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index e66a6275f1e..cc1bb8367a6 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -939,7 +939,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, return } - for _, backup := range backups.Items { + for i, backup := range backups.Items { if backup.Status.Phase != velerov1api.BackupPhaseInProgress { log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase) continue @@ -948,7 +948,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.BackupPhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { //nolint + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName()) continue } @@ -962,7 +962,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, log.WithError(errors.WithStack(err)).Error("failed to list restores") return } - for _, restore := range restores.Items { + for i, restore := range restores.Items { if restore.Status.Phase != velerov1api.RestorePhaseInProgress { log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase) continue @@ -971,7 +971,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.RestorePhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { //nolint + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName()) continue } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 7b188d98b32..76c39c8c842 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -325,11 +325,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque }); err != nil { log.WithError(errors.WithStack(err)).Error("Error listing restore API objects") } else { - for _, restore := range restoreList.Items { + for i, restore := range restoreList.Items { if restore.Spec.BackupName != backup.Name { continue } - restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) //nolint + restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i])) restoreLog.Info("Deleting restore log/results from backup storage") if err := backupStore.DeleteRestore(restore.Name); err != nil { @@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque } restoreLog.Info("Deleting restore referencing backup") - if err := r.Delete(ctx, &restore); err != nil { //nolint - errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) //nolint + if err := r.Delete(ctx, &restoreList.Items[i]); err != nil { + errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error()) } } } @@ -423,11 +423,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")} } var errs []error - for _, dbr := range dbrList.Items { + for i, dbr := range dbrList.Items { if dbr.Name == req.Name { continue } - if err := r.Delete(ctx, &dbr); err != nil { //nolint + if err := r.Delete(ctx, &dbrList.Items[i]); err != nil { errs = append(errs, errors.WithStack(err)) } else { log.Infof("deletion request '%s' removed.", dbr.Name) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 30ebe3b094b..d29a586416a 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -317,13 +317,13 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati return } - for _, backup := range backupList.Items { + for i, backup := range backupList.Items { log = log.WithField("backup", backup.Name) if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) { continue } - if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { //nolint + if err := b.client.Delete(ctx, &backupList.Items[i], &client.DeleteOptions{}); err != nil { log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster") } else { log.Debug("Deleted orphaned backup from cluster") @@ -344,10 +344,10 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b if err := b.client.List(ctx, &vsList, listOptions); err != nil { log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName) } else { - for _, vs := range vsList.Items { + for i, vs := range vsList.Items { name := kube.NamespaceAndName(vs.GetObjectMeta()) log.Debugf("Deleting volumesnapshot %s", name) - if err := b.client.Delete(context.TODO(), &vs); err != nil { //nolint + if err := b.client.Delete(context.TODO(), &vsList.Items[i]); err != nil { log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name) } } @@ -379,11 +379,13 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList { bslArray = append(bslArray, &inputBSLList.Items[i]) // append everything before the default for _, bsl := range inputBSLList.Items[:i] { - bslArray = append(bslArray, &bsl) //nolint + cpBsl := bsl + bslArray = append(bslArray, &cpBsl) } // append everything after the default for _, bsl := range inputBSLList.Items[i+1:] { - bslArray = append(bslArray, &bsl) //nolint + cpBsl := bsl + bslArray = append(bslArray, &cpBsl) } meta.SetList(resultBSLList, bslArray) diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index 091c20b51be..cae3e88e57f 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -61,12 +61,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod return errors.Wrap(err, "failed to list daemonset pods") } - for _, pod := range pods.Items { - if kube.IsPodRunning(&pod) != nil { //nolint + for i := range pods.Items { + if kube.IsPodRunning(&pods.Items[i]) != nil { continue } - if pod.Spec.NodeName == nodeName { + if pods.Items[i].Spec.NodeName == nodeName { return nil } } diff --git a/pkg/restore/admissionwebhook_config_action.go b/pkg/restore/admissionwebhook_config_action.go index 7a617b6f828..74fd1b3ff9b 100644 --- a/pkg/restore/admissionwebhook_config_action.go +++ b/pkg/restore/admissionwebhook_config_action.go @@ -70,15 +70,15 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA return velero.NewRestoreItemActionExecuteOutput(input.Item), nil } newWebhooks := make([]interface{}, 0) - for i, entry := range webhooks { + for i := range webhooks { logger2 := logger.WithField("index", i) - obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) //nolint + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i]) if err != nil { logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err) continue } s, _, _ := unstructured.NestedString(obj, "sideEffects") - if s != "None" && s != "NoneOnDryRun" { //nolint + if s != "None" && s != "NoneOnDryRun" { logger2.Infof("reset the invalid sideEffects value '%s' to 'None'", s) obj["sideEffects"] = "None" } diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 42f0a35419b..73603b059f2 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) { var volume *corev1api.Volume - for _, item := range pod.Spec.Volumes { - if item.Name == volumeName { - volume = &item //nolint + for i := range pod.Spec.Volumes { + if pod.Spec.Volumes[i].Name == volumeName { + volume = &pod.Spec.Volumes[i] break } }