Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added success logging to the provisioner #666

Merged
merged 1 commit into from
Sep 11, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion pkg/controllers/allocation/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,11 @@ func NewController(kubeClient client.Client, coreV1Client corev1.CoreV1Interface

// Reconcile executes an allocation control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named("Allocation"))
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(fmt.Sprintf("allocation.provisioner/%s", req.Name)))
logging.FromContext(ctx).Infof("Starting provisioning loop")
defer func() {
logging.FromContext(ctx).Infof("Watching for pod events")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just curious, I'm not sure I can figure out why this is in a defer?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This prints at the end of reconciliation. If the controller is hanging, the last log line will be "Watching for pod events". I could change this to something like "Waiting for pod events". The idea is to reassure users that karpenter is working even if its hanging.

}()

// 1. Fetch provisioner
provisioner, err := c.provisionerFor(ctx, req.NamespacedName)
Expand All @@ -91,13 +95,15 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco
}

// 2. Wait on a pod batch
logging.FromContext(ctx).Infof("Waiting to batch additional pods")
c.Batcher.Wait(provisioner)

// 3. Filter pods
pods, err := c.Filter.GetProvisionablePods(ctx, provisioner)
if err != nil {
return result.RetryIfError(ctx, fmt.Errorf("filtering pods, %w", err))
}
logging.FromContext(ctx).Infof("Found %d provisionable pods", len(pods))
if len(pods) == 0 {
return reconcile.Result{}, nil
}
Expand Down
5 changes: 0 additions & 5 deletions pkg/controllers/allocation/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ func (f *Filter) GetProvisionablePods(ctx context.Context, provisioner *v1alpha3
if err := f.KubeClient.List(ctx, pods, client.MatchingFields{"spec.nodeName": ""}); err != nil {
return nil, fmt.Errorf("listing unscheduled pods, %w", err)
}
if len(pods.Items) == 0 {
return nil, nil
}

// 2. Filter pods that aren't provisionable
provisionable := []*v1.Pod{}
for _, p := range pods.Items {
Expand All @@ -53,7 +49,6 @@ func (f *Filter) GetProvisionablePods(ctx context.Context, provisioner *v1alpha3
}
provisionable = append(provisionable, ptr.Pod(p))
}
logging.FromContext(ctx).Infof("Found %d provisionable pods", len(provisionable))
return provisionable, nil
}

Expand Down