Skip to content

Commit

Permalink
Address govet linter errors when moving to golangci 1.62.0 version
Browse files Browse the repository at this point in the history
Predominant errors were flagged off as:
printf: non-constant format string in call to fmt.Errorf

Signed-off-by: Shyamsundar Ranganathan <[email protected]>
  • Loading branch information
ShyamsundarR committed Nov 26, 2024
1 parent 23f27ff commit a669424
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 11 deletions.
4 changes: 2 additions & 2 deletions internal/controller/drcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -475,11 +475,11 @@ func (u *drclusterInstance) initializeStatus() {
func (u *drclusterInstance) getDRClusterDeployedStatus(drcluster *ramen.DRCluster) error {
mw, err := u.mwUtil.GetDrClusterManifestWork(drcluster.Name)
if err != nil {
return fmt.Errorf(fmt.Sprintf("error in fetching DRCluster ManifestWork %v", err))
return fmt.Errorf("error in fetching DRCluster ManifestWork %v", err)
}

if mw == nil {
return fmt.Errorf(fmt.Sprintf("missing DRCluster ManifestWork resource %v", err))
return fmt.Errorf("missing DRCluster ManifestWork resource %v", err)
}

deployed := util.IsManifestInAppliedState(mw)
Expand Down
3 changes: 2 additions & 1 deletion internal/controller/drplacementcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,8 @@ func (d *DRPCInstance) RunFailover() (bool, error) {
const done = true

if d.instance.Spec.FailoverCluster == "" {
msg := "missing value for spec.FailoverCluster"
const msg = "missing value for spec.FailoverCluster"

addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), msg)

Expand Down
9 changes: 5 additions & 4 deletions internal/controller/volumereplicationgroup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package controllers

import (
"context"
"errors"
"fmt"
"reflect"
"time"
Expand All @@ -23,7 +24,7 @@ import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -414,7 +415,7 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct

// Fetch the VolumeReplicationGroup instance
if err := r.APIReader.Get(ctx, req.NamespacedName, v.instance); err != nil {
if errors.IsNotFound(err) {
if k8serrors.IsNotFound(err) {
log.Info("Resource not found")

return ctrl.Result{}, nil
Expand Down Expand Up @@ -867,7 +868,7 @@ func (v *VRGInstance) separatePVCUsingPeerClassAndSC(peerClasses []ramendrv1alph
msg := fmt.Sprintf("peerClass matching storageClass %s not found for async PVC", storageClass.GetName())
v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonPeerClassNotFound, msg)

return fmt.Errorf(msg)
return errors.New(msg)
}

pvcEnabledForVolSync := util.IsPVCMarkedForVolSync(v.instance.GetAnnotations())
Expand Down Expand Up @@ -1017,7 +1018,7 @@ func (v *VRGInstance) findPeerClassMatchingSC(
msg := fmt.Sprintf("peerClass matching storageClass %s not found for PVC", storageClass.GetName())
v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonPeerClassNotFound, msg)

return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

if !slices.Contains(peerClass.StorageID, storageClass.GetLabels()[StorageIDLabel]) {
Expand Down
8 changes: 4 additions & 4 deletions internal/controller/vrg_volrep.go
Original file line number Diff line number Diff line change
Expand Up @@ -635,13 +635,13 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim

if numProfilesUploaded != numProfilesToUpload {
// Merely defensive as we don't expect to reach here
msg := fmt.Sprintf("Uploaded PV/PVC cluster data to only %d of %d S3 profile(s): %v",
msg := fmt.Sprintf("uploaded PV/PVC cluster data to only %d of %d S3 profile(s): %v",
numProfilesUploaded, numProfilesToUpload, s3Profiles)
v.log.Info(msg)
v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name,
VRGConditionReasonUploadError, msg)

return fmt.Errorf(msg)
return errors.New(msg)
}

if err := v.addArchivedAnnotationForPVC(pvc, log); err != nil {
Expand All @@ -651,7 +651,7 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim
v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name,
VRGConditionReasonClusterDataAnnotationFailed, msg)

return fmt.Errorf(msg)
return errors.New(msg)
}

msg := fmt.Sprintf("Done uploading PV/PVC cluster data to %d of %d S3 profile(s): %v",
Expand Down Expand Up @@ -2128,7 +2128,7 @@ func (v *VRGInstance) checkPVClusterData(pvList []corev1.PersistentVolume) error
claimKey, prevPV.Name, thisPV.Name)
v.log.Info(msg)

return fmt.Errorf(msg)
return errors.New(msg)
}

return nil
Expand Down

0 comments on commit a669424

Please sign in to comment.