Skip to content

Commit

Permalink
Merge pull request #1217 from fluxcd/remove-event-error
Browse files Browse the repository at this point in the history
Remove Event error
  • Loading branch information
darkowlzz authored Sep 15, 2023
2 parents 8aa917d + 5a92e8b commit 617cfb2
Show file tree
Hide file tree
Showing 10 changed files with 212 additions and 250 deletions.
4 changes: 4 additions & 0 deletions api/v1/condition_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,4 +104,8 @@ const (

// CacheOperationFailedReason signals a failure in cache operation.
CacheOperationFailedReason string = "CacheOperationFailed"

// PatchOperationFailedReason signals a failure in patching a kubernetes API
// object.
PatchOperationFailedReason string = "PatchOperationFailed"
)
90 changes: 45 additions & 45 deletions internal/controller/bucket_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(),
summarize.WithProcessors(
summarize.RecordContextualError,
summarize.ErrorActionHandler,
summarize.RecordReconcileReq,
),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{
Expand Down Expand Up @@ -268,21 +268,21 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
case recAtVal != obj.Status.GetLastHandledReconcileRequest():
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
}

// Create temp working dir
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name))
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create temporary working directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to create temporary working directory: %w", err),
sourcev1.DirCreationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
Expand Down Expand Up @@ -402,7 +402,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg)
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
return sreconcile.ResultSuccess, nil
}
Expand All @@ -423,7 +423,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria
func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
secret, err := r.getBucketSecret(ctx, obj)
if err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
// Return error as the world as observed may change
return sreconcile.ResultEmpty, e
Expand All @@ -434,42 +434,42 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
switch obj.Spec.Provider {
case bucketv1.GoogleBucketProvider:
if err = gcp.ValidateSecret(secret); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
if provider, err = gcp.NewClient(ctx, secret); err != nil {
e := &serror.Event{Err: err, Reason: "ClientError"}
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
case bucketv1.AzureBucketProvider:
if err = azure.ValidateSecret(secret); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
if provider, err = azure.NewClient(obj, secret); err != nil {
e := &serror.Event{Err: err, Reason: "ClientError"}
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
default:
if err = minio.ValidateSecret(secret); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
if provider, err = minio.NewClient(obj, secret); err != nil {
e := &serror.Event{Err: err, Reason: "ClientError"}
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
}

// Fetch etag index
if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil {
e := &serror.Event{Err: err, Reason: bucketv1.BucketOperationFailedReason}
e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
Expand Down Expand Up @@ -501,7 +501,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
}()

if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil {
e := &serror.Event{Err: err, Reason: bucketv1.BucketOperationFailedReason}
e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
Expand Down Expand Up @@ -550,45 +550,45 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri

// Ensure target path exists and is a directory
if f, err := os.Stat(dir); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to stat source path: %w", err),
Reason: sourcev1.StatOperationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to stat source path: %w", err),
sourcev1.StatOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
} else if !f.IsDir() {
e := &serror.Event{
Err: fmt.Errorf("source path '%s' is not a directory", dir),
Reason: sourcev1.InvalidPathReason,
}
e := serror.NewGeneric(
fmt.Errorf("source path '%s' is not a directory", dir),
sourcev1.InvalidPathReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}

// Ensure artifact directory exists and acquire lock
if err := r.Storage.MkdirAll(artifact); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create artifact directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to create artifact directory: %w", err),
sourcev1.DirCreationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
unlock, err := r.Storage.Lock(artifact)
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
Reason: meta.FailedReason,
}
return sreconcile.ResultEmpty, serror.NewGeneric(
fmt.Errorf("failed to acquire lock for artifact: %w", err),
meta.FailedReason,
)
}
defer unlock()

// Archive directory to storage
if err := r.Storage.Archive(&artifact, dir, nil); err != nil {
e := &serror.Event{
Err: fmt.Errorf("unable to archive artifact to storage: %s", err),
Reason: sourcev1.ArchiveOperationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("unable to archive artifact to storage: %s", err),
sourcev1.ArchiveOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
Expand Down Expand Up @@ -635,10 +635,10 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bu
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Bucket) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err),
Reason: "GarbageCollectionFailed",
}
return serror.NewGeneric(
fmt.Errorf("garbage collection for deleted resource failed: %s", err),
"GarbageCollectionFailed",
)
} else if deleted != "" {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected artifacts for deleted resource")
Expand All @@ -649,10 +649,10 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Buc
if obj.GetArtifact() != nil {
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
if err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
Reason: "GarbageCollectionFailed",
}
return serror.NewGeneric(
fmt.Errorf("garbage collection of artifacts failed: %w", err),
"GarbageCollectionFailed",
)
}
if len(delFiles) > 0 {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
Expand Down
20 changes: 11 additions & 9 deletions internal/controller/gitrepository_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,11 +264,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
case recAtVal != obj.Status.GetLastHandledReconcileRequest():
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
}

Expand Down Expand Up @@ -425,7 +425,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg)
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
return sreconcile.ResultSuccess, nil
}
Expand Down Expand Up @@ -527,7 +527,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
}
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
}
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
Expand Down Expand Up @@ -561,6 +561,8 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
)
ge.Notification = false
ge.Ignore = true
// Log it as this will not be passed to the runtime.
ge.Log = true
ge.Event = corev1.EventTypeNormal
// Remove any stale fetch failed condition.
conditions.Delete(obj, sourcev1.FetchFailedCondition)
Expand Down Expand Up @@ -599,7 +601,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
}
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, err
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
}
return sreconcile.ResultSuccess, nil
Expand Down Expand Up @@ -815,10 +817,10 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc

// Copy artifact (sub)contents to configured directory.
if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
Reason: "CopyFailure",
}
e := serror.NewGeneric(
fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
"CopyFailure",
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
Expand Down
Loading

0 comments on commit 617cfb2

Please sign in to comment.