Skip to content

Commit

Permalink
fix lint loop iterator problem
Browse files Browse the repository at this point in the history
Signed-off-by: Lyndon-Li <[email protected]>
  • Loading branch information
Lyndon-Li committed Oct 18, 2022
1 parent d7b4583 commit 3e772fd
Show file tree
Hide file tree
Showing 15 changed files with 52 additions and 54 deletions.
2 changes: 1 addition & 1 deletion golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ linters-settings:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
min-occurrences: 5
gocritic:
# Which checks should be enabled; can't be combined with 'disabled-checks';
# See https://go-critic.github.io/overview#checks-overview
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/cli/backup/describe.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}

first := true
for _, backup := range backups.Items {
for i, backup := range backups.Items {
deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID))
deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions)
if err != nil {
Expand Down Expand Up @@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
}

s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint
s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
if first {
first = false
fmt.Print(s)
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/cli/backuplocation/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
return errors.WithStack(err)
}
for _, location := range locations.Items {
for i, location := range locations.Items {
if location.Spec.Default {
location.Spec.Default = false
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
return errors.WithStack(err)
}
break
Expand Down
12 changes: 6 additions & 6 deletions pkg/cmd/cli/backuplocation/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error {
}

// Create a backup-location deletion request for each
for _, location := range locations.Items {
if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { //nolint
for i, location := range locations.Items {
if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(err))
continue
}
Expand Down Expand Up @@ -162,8 +162,8 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele

func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error {
var errs []error
for _, backup := range backups.Items {
if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { //nolint
for i, backup := range backups.Items {
if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err)))
continue
}
Expand All @@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err

func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
var errs []error
for _, repo := range repos.Items {
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { //nolint
for i, repo := range repos.Items {
if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err)))
continue
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/cli/backuplocation/set.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
return errors.WithStack(err)
}
for _, location := range locations.Items {
for i, location := range locations.Items {
if !location.Spec.Default {
continue
}
Expand All @@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
break
}
location.Spec.Default = false
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
return errors.WithStack(err)
}
break
Expand Down
8 changes: 4 additions & 4 deletions pkg/cmd/cli/nodeagent/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
return
}
for _, pvb := range pvbs.Items {
for i, pvb := range pvbs.Items {
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress {
s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase)
continue
Expand All @@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase)
pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { //nolint
if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
continue
}
Expand All @@ -321,7 +321,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
return
}
for _, pvr := range pvrs.Items {
for i, pvr := range pvrs.Items {
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
continue
Expand All @@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase)
pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { //nolint
if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
continue
}
Expand Down
12 changes: 4 additions & 8 deletions pkg/cmd/cli/restore/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,6 @@ func NewCreateOptions() *CreateOptions {
}
}

const (
strTrue = "true"
)

func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
flags.StringVar(&o.BackupName, "from-backup", "", "Backup to restore from")
flags.StringVar(&o.ScheduleName, "from-schedule", "", "Schedule to restore from")
Expand All @@ -127,18 +123,18 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.")
// this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true"
// like a normal bool flag
f.NoOptDefVal = strTrue
f.NoOptDefVal = "true"

f = flags.VarPF(&o.PreserveNodePorts, "preserve-nodeports", "", "Whether to preserve nodeports of Services when restoring.")
// this allows the user to just specify "--preserve-nodeports" as shorthand for "--preserve-nodeports=true"
// like a normal bool flag
f.NoOptDefVal = strTrue
f.NoOptDefVal = "true"

f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the restore.")
f.NoOptDefVal = strTrue
f.NoOptDefVal = "true"

f = flags.VarPF(&o.AllowPartiallyFailed, "allow-partially-failed", "", "If using --from-schedule, whether to consider PartiallyFailed backups when looking for the most recent one. This flag has no effect if not using --from-schedule.")
f.NoOptDefVal = strTrue
f.NoOptDefVal = "true"

flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "Wait for the operation to complete.")
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/cli/restore/describe.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}

first := true
for _, restore := range restores.Items {
for i, restore := range restores.Items {
opts := newPodVolumeRestoreListOptions(restore.Name)
podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
}

s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint
s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
if first {
first = false
fmt.Print(s)
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/cli/schedule/describe.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}

first := true
for _, schedule := range schedules.Items {
s := output.DescribeSchedule(&schedule) //nolint
for i := range schedules.Items {
s := output.DescribeSchedule(&schedules.Items[i])
if first {
first = false
fmt.Print(s)
Expand Down
8 changes: 4 additions & 4 deletions pkg/cmd/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -939,7 +939,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
return
}

for _, backup := range backups.Items {
for i, backup := range backups.Items {
if backup.Status.Phase != velerov1api.BackupPhaseInProgress {
log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase)
continue
Expand All @@ -948,7 +948,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
updated.Status.Phase = velerov1api.BackupPhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { //nolint
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName())
continue
}
Expand All @@ -962,7 +962,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
log.WithError(errors.WithStack(err)).Error("failed to list restores")
return
}
for _, restore := range restores.Items {
for i, restore := range restores.Items {
if restore.Status.Phase != velerov1api.RestorePhaseInProgress {
log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase)
continue
Expand All @@ -971,7 +971,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
updated.Status.Phase = velerov1api.RestorePhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { //nolint
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName())
continue
}
Expand Down
12 changes: 6 additions & 6 deletions pkg/controller/backup_deletion_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -325,11 +325,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
} else {
for _, restore := range restoreList.Items {
for i, restore := range restoreList.Items {
if restore.Spec.BackupName != backup.Name {
continue
}
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) //nolint
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i]))

restoreLog.Info("Deleting restore log/results from backup storage")
if err := backupStore.DeleteRestore(restore.Name); err != nil {
Expand All @@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}

restoreLog.Info("Deleting restore referencing backup")
if err := r.Delete(ctx, &restore); err != nil { //nolint
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) //nolint
if err := r.Delete(ctx, &restoreList.Items[i]); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error())
}
}
}
Expand Down Expand Up @@ -423,11 +423,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")}
}
var errs []error
for _, dbr := range dbrList.Items {
for i, dbr := range dbrList.Items {
if dbr.Name == req.Name {
continue
}
if err := r.Delete(ctx, &dbr); err != nil { //nolint
if err := r.Delete(ctx, &dbrList.Items[i]); err != nil {
errs = append(errs, errors.WithStack(err))
} else {
log.Infof("deletion request '%s' removed.", dbr.Name)
Expand Down
14 changes: 8 additions & 6 deletions pkg/controller/backup_sync_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,13 +317,13 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati
return
}

for _, backup := range backupList.Items {
for i, backup := range backupList.Items {
log = log.WithField("backup", backup.Name)
if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) {
continue
}

if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { //nolint
if err := b.client.Delete(ctx, &backupList.Items[i], &client.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
} else {
log.Debug("Deleted orphaned backup from cluster")
Expand All @@ -344,10 +344,10 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b
if err := b.client.List(ctx, &vsList, listOptions); err != nil {
log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName)
} else {
for _, vs := range vsList.Items {
for i, vs := range vsList.Items {
name := kube.NamespaceAndName(vs.GetObjectMeta())
log.Debugf("Deleting volumesnapshot %s", name)
if err := b.client.Delete(context.TODO(), &vs); err != nil { //nolint
if err := b.client.Delete(context.TODO(), &vsList.Items[i]); err != nil {
log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name)
}
}
Expand Down Expand Up @@ -379,11 +379,13 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList {
bslArray = append(bslArray, &inputBSLList.Items[i])
// append everything before the default
for _, bsl := range inputBSLList.Items[:i] {
bslArray = append(bslArray, &bsl) //nolint
cpBsl := bsl
bslArray = append(bslArray, &cpBsl)
}
// append everything after the default
for _, bsl := range inputBSLList.Items[i+1:] {
bslArray = append(bslArray, &bsl) //nolint
cpBsl := bsl
bslArray = append(bslArray, &cpBsl)
}
meta.SetList(resultBSLList, bslArray)

Expand Down
6 changes: 3 additions & 3 deletions pkg/nodeagent/node_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod
return errors.Wrap(err, "failed to list daemonset pods")
}

for _, pod := range pods.Items {
if kube.IsPodRunning(&pod) != nil { //nolint
for i := range pods.Items {
if kube.IsPodRunning(&pods.Items[i]) != nil {
continue
}

if pod.Spec.NodeName == nodeName {
if pods.Items[i].Spec.NodeName == nodeName {
return nil
}
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/restore/admissionwebhook_config_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA
return velero.NewRestoreItemActionExecuteOutput(input.Item), nil
}
newWebhooks := make([]interface{}, 0)
for i, entry := range webhooks {
for i := range webhooks {
logger2 := logger.WithField("index", i)
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) //nolint
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i])
if err != nil {
logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err)
continue
}
s, _, _ := unstructured.NestedString(obj, "sideEffects")
if s != "None" && s != "NoneOnDryRun" { //nolint
if s != "None" && s != "NoneOnDryRun" {
logger2.Infof("reset the invalid sideEffects value '%s' to 'None'", s)
obj["sideEffects"] = "None"
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/util/kube/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) {
var volume *corev1api.Volume

for _, item := range pod.Spec.Volumes {
if item.Name == volumeName {
volume = &item //nolint
for i := range pod.Spec.Volumes {
if pod.Spec.Volumes[i].Name == volumeName {
volume = &pod.Spec.Volumes[i]
break
}
}
Expand Down

0 comments on commit 3e772fd

Please sign in to comment.