Skip to content

Commit

Permalink
Refactoring logging and metrics (#835)
Browse files Browse the repository at this point in the history
* Refactoring logging and metrics

* add utility functions that inject reconcile resource name into context

* Add injecetion package that handles all context injection
  • Loading branch information
felix-zhe-huang authored Nov 23, 2021
1 parent 568e82e commit 71ce076
Show file tree
Hide file tree
Showing 23 changed files with 127 additions and 99 deletions.
10 changes: 5 additions & 5 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
"github.com/awslabs/karpenter/pkg/controllers/node"
"github.com/awslabs/karpenter/pkg/controllers/provisioning"
"github.com/awslabs/karpenter/pkg/controllers/termination"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/restconfig"
"github.com/go-logr/zapr"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
Expand All @@ -37,7 +37,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
"knative.dev/pkg/configmap/informer"
"knative.dev/pkg/injection"
knativeinjection "knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
Expand Down Expand Up @@ -67,8 +67,8 @@ func main() {

// Set up logger and watch for changes to log level
ctx := LoggingContextOrDie(config, clientSet)
ctx = restconfig.Inject(ctx, config)
ctx = options.Inject(ctx, opts)
ctx = injection.WithConfig(ctx, config)
ctx = injection.WithOptions(ctx, opts)

// Set up controller runtime controller
cloudProvider := registry.NewCloudProvider(ctx, cloudprovider.Options{ClientSet: clientSet})
Expand Down Expand Up @@ -99,7 +99,7 @@ func main() {
// LoggingContextOrDie injects a logger into the returned context. The logger is
// configured by the ConfigMap `config-logging` and live updates the level.
func LoggingContextOrDie(config *rest.Config, clientSet *kubernetes.Clientset) context.Context {
ctx, startinformers := injection.EnableInjectionOrDie(signals.NewContext(), config)
ctx, startinformers := knativeinjection.EnableInjectionOrDie(signals.NewContext(), config)
logger, atomicLevel := sharedmain.SetupLoggerOrDie(ctx, component)
ctx = logging.WithLogger(ctx, logger)
rest.SetDefaultWarningHandler(&logging.WarningHandler{Logger: logger})
Expand Down
9 changes: 5 additions & 4 deletions cmd/webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@ import (
"github.com/awslabs/karpenter/pkg/apis"
"github.com/awslabs/karpenter/pkg/cloudprovider"
"github.com/awslabs/karpenter/pkg/cloudprovider/registry"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/awslabs/karpenter/pkg/utils/options"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
knativeinjection "knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
Expand All @@ -41,8 +42,8 @@ var (
)

func main() {
config := injection.ParseAndGetRESTConfigOrDie()
ctx := webhook.WithOptions(injection.WithNamespaceScope(signals.NewContext(), system.Namespace()), webhook.Options{
config := knativeinjection.ParseAndGetRESTConfigOrDie()
ctx := webhook.WithOptions(knativeinjection.WithNamespaceScope(signals.NewContext(), system.Namespace()), webhook.Options{
Port: opts.WebhookPort,
ServiceName: "karpenter-webhook",
SecretName: "karpenter-webhook-cert",
Expand Down Expand Up @@ -91,5 +92,5 @@ func newConfigValidationController(ctx context.Context, cmw configmap.Watcher) *
}

func InjectContext(ctx context.Context) context.Context {
return options.Inject(ctx, opts)
return injection.WithOptions(ctx, opts)
}
6 changes: 3 additions & 3 deletions pkg/cloudprovider/aws/apis/v1alpha1/provider_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (

"github.com/awslabs/karpenter/pkg/apis/provisioning/v1alpha5"
"github.com/awslabs/karpenter/pkg/utils/functional"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/injection"
v1 "k8s.io/api/core/v1"
)

Expand All @@ -30,8 +30,8 @@ var ClusterDiscoveryTagKeyFormat = "kubernetes.io/cluster/%s"
func (c *Constraints) Default(ctx context.Context) {
c.defaultArchitecture()
c.defaultCapacityTypes()
c.defaultSubnets(options.Get(ctx).ClusterName)
c.defaultSecurityGroups(options.Get(ctx).ClusterName)
c.defaultSubnets(injection.GetOptions(ctx).ClusterName)
c.defaultSecurityGroups(injection.GetOptions(ctx).ClusterName)
}

func (c *Constraints) defaultCapacityTypes() {
Expand Down
4 changes: 2 additions & 2 deletions pkg/cloudprovider/aws/apis/v1alpha1/provider_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
"context"
"fmt"

"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/injection"
"knative.dev/pkg/apis"
)

Expand Down Expand Up @@ -75,7 +75,7 @@ func (a *AWS) validateSecurityGroups() (errs *apis.FieldError) {
func (a *AWS) validateTags(ctx context.Context) (errs *apis.FieldError) {
// Avoiding a check on number of tags (hard limit of 50) since that limit is shared by user
// defined and Karpenter tags, and the latter could change over time.
managedTags := ManagedTagsFor(options.Get(ctx).ClusterName)
managedTags := ManagedTagsFor(injection.GetOptions(ctx).ClusterName)
for tagKey, tagValue := range a.Tags {
if tagKey == "" {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf(
Expand Down
4 changes: 2 additions & 2 deletions pkg/cloudprovider/aws/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ import (
"github.com/awslabs/karpenter/pkg/apis/provisioning/v1alpha5"
"github.com/awslabs/karpenter/pkg/cloudprovider"
"github.com/awslabs/karpenter/pkg/cloudprovider/aws/apis/v1alpha1"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/injection"
)

type InstanceProvider struct {
Expand Down Expand Up @@ -132,7 +132,7 @@ func (p *InstanceProvider) launchInstances(ctx context.Context, constraints *v1a
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
Tags: v1alpha1.MergeTags(v1alpha1.ManagedTagsFor(options.Get(ctx).ClusterName), constraints.Tags),
Tags: v1alpha1.MergeTags(v1alpha1.ManagedTagsFor(injection.GetOptions(ctx).ClusterName), constraints.Tags),
},
},
// OnDemandOptions are allowed to be specified even when requesting spot
Expand Down
11 changes: 5 additions & 6 deletions pkg/cloudprovider/aws/launchtemplate.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ import (
"github.com/awslabs/karpenter/pkg/cloudprovider"
"github.com/awslabs/karpenter/pkg/cloudprovider/aws/apis/v1alpha1"
"github.com/awslabs/karpenter/pkg/utils/functional"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/restconfig"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/mitchellh/hashstructure/v2"
core "k8s.io/api/core/v1"
"k8s.io/client-go/transport"
Expand Down Expand Up @@ -109,7 +108,7 @@ func (p *LaunchTemplateProvider) Get(ctx context.Context, constraints *v1alpha1.
// Ensure the launch template exists, or create it
launchTemplate, err := p.ensureLaunchTemplate(ctx, &launchTemplateOptions{
UserData: userData,
ClusterName: options.Get(ctx).ClusterName,
ClusterName: injection.GetOptions(ctx).ClusterName,
InstanceProfile: constraints.InstanceProfile,
AMIID: amiID,
SecurityGroupsIds: securityGroupsIds,
Expand Down Expand Up @@ -234,9 +233,9 @@ func (p *LaunchTemplateProvider) getUserData(ctx context.Context, constraints *v
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
/etc/eks/bootstrap.sh '%s' %s \
--apiserver-endpoint '%s'`,
options.Get(ctx).ClusterName,
injection.GetOptions(ctx).ClusterName,
containerRuntimeArg,
options.Get(ctx).ClusterEndpoint))
injection.GetOptions(ctx).ClusterEndpoint))
caBundle, err := p.GetCABundle(ctx)
if err != nil {
return "", fmt.Errorf("getting ca bundle for user data, %w", err)
Expand Down Expand Up @@ -290,7 +289,7 @@ func (p *LaunchTemplateProvider) GetCABundle(ctx context.Context) (*string, erro
// have used the simpler client-go InClusterConfig() method.
// However, that only works when Karpenter is running as a Pod
// within the same cluster it's managing.
restConfig := restconfig.Get(ctx)
restConfig := injection.GetConfig(ctx)
if restConfig == nil {
return nil, nil
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/cloudprovider/aws/securitygroups.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/awslabs/karpenter/pkg/cloudprovider/aws/apis/v1alpha1"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/mitchellh/hashstructure/v2"
"github.com/patrickmn/go-cache"
"knative.dev/pkg/logging"
Expand Down Expand Up @@ -105,7 +105,7 @@ func (s *SecurityGroupProvider) filterClusterTaggedGroups(ctx context.Context, s
if s.hasClusterTag(ctx, securityGroup) {
if foundClusterTag {
logging.FromContext(ctx).Debugf("Ignoring security group %s, only one group with tag %s is allowed", aws.StringValue(securityGroup.GroupId),
fmt.Sprint(v1alpha1.ClusterDiscoveryTagKeyFormat, options.Get(ctx).ClusterName))
fmt.Sprint(v1alpha1.ClusterDiscoveryTagKeyFormat, injection.GetOptions(ctx).ClusterName))
continue
}
foundClusterTag = true
Expand All @@ -117,7 +117,7 @@ func (s *SecurityGroupProvider) filterClusterTaggedGroups(ctx context.Context, s

func (s *SecurityGroupProvider) hasClusterTag(ctx context.Context, securityGroup *ec2.SecurityGroup) bool {
for _, tag := range securityGroup.Tags {
if aws.StringValue(tag.Key) == fmt.Sprintf(v1alpha1.ClusterDiscoveryTagKeyFormat, options.Get(ctx).ClusterName) {
if aws.StringValue(tag.Key) == fmt.Sprintf(v1alpha1.ClusterDiscoveryTagKeyFormat, injection.GetOptions(ctx).ClusterName) {
return true
}
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/cloudprovider/aws/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/awslabs/karpenter/pkg/controllers/provisioning"
"github.com/awslabs/karpenter/pkg/test"
. "github.com/awslabs/karpenter/pkg/test/expectations"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/awslabs/karpenter/pkg/utils/options"
"github.com/awslabs/karpenter/pkg/utils/parallel"
"github.com/awslabs/karpenter/pkg/utils/resources"
Expand Down Expand Up @@ -59,7 +60,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
env = test.NewEnvironment(ctx, func(e *test.Environment) {
ctx = options.Inject(ctx, options.Options{ClusterName: "test-cluster", ClusterEndpoint: "https://test-cluster"})
ctx = injection.WithOptions(ctx, options.Options{ClusterName: "test-cluster", ClusterEndpoint: "https://test-cluster"})
launchTemplateCache = cache.New(CacheTTL, CacheCleanupInterval)
fakeEC2API = &fake.EC2API{}
subnetProvider := NewSubnetProvider(fakeEC2API)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/metrics/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
)

const (
controllerName = "Metrics"
controllerName = "metrics"

metricSubsystemCapacity = "capacity"
metricSubsystemPods = "pods"
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/metrics/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ package metrics
import (
"context"
"fmt"
"strings"
"time"

"github.com/awslabs/karpenter/pkg/apis/provisioning/v1alpha5"
Expand Down Expand Up @@ -48,7 +47,7 @@ func NewController(kubeClient client.Client, cloudProvider cloudprovider.CloudPr
}

func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
loggerName := fmt.Sprintf("%s.provisioner/%s", strings.ToLower(controllerName), req.Name)
loggerName := fmt.Sprintf("%s.provisioner/%s", controllerName, req.Name)
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(loggerName))

// Does the provisioner exist?
Expand Down
6 changes: 3 additions & 3 deletions pkg/controllers/node/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ type Controller struct {

// Reconcile executes a reallocation control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named("Node"))
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named("node").With("node", req.String()))
// 1. Retrieve Node, ignore if not provisioned or terminating
stored := &v1.Node{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, stored); err != nil {
Expand Down Expand Up @@ -102,7 +102,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco
// 4. Patch any changes, regardless of errors
if !equality.Semantic.DeepEqual(node, stored) {
if err := c.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, fmt.Errorf("patching node %s, %w", node.Name, err)
return reconcile.Result{}, fmt.Errorf("patching node, %w", err)
}
}
// 5. Requeue if error or if retryAfter is set
Expand All @@ -115,7 +115,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco
func (c *Controller) Register(ctx context.Context, m manager.Manager) error {
return controllerruntime.
NewControllerManagedBy(m).
Named("Node").
Named("node").
For(&v1.Node{}).
Watches(
// Reconcile all nodes related to a provisioner when it changes.
Expand Down
10 changes: 5 additions & 5 deletions pkg/controllers/node/emptiness.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi
if !empty {
if hasEmptinessTimestamp {
delete(n.Annotations, v1alpha5.EmptinessTimestampAnnotationKey)
logging.FromContext(ctx).Infof("Removed emptiness TTL from node %s", n.Name)
logging.FromContext(ctx).Infof("Removed emptiness TTL from node")
}
return reconcile.Result{}, nil
}
Expand All @@ -64,7 +64,7 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi
ttl := time.Duration(ptr.Int64Value(provisioner.Spec.TTLSecondsAfterEmpty)) * time.Second
if !hasEmptinessTimestamp {
n.Annotations[v1alpha5.EmptinessTimestampAnnotationKey] = injectabletime.Now().Format(time.RFC3339)
logging.FromContext(ctx).Infof("Added TTL to empty node %s", n.Name)
logging.FromContext(ctx).Infof("Added TTL to empty node")
return reconcile.Result{RequeueAfter: ttl}, nil
}
// 4. Delete node if beyond TTL
Expand All @@ -73,9 +73,9 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi
return reconcile.Result{}, fmt.Errorf("parsing emptiness timestamp, %s", emptinessTimestamp)
}
if injectabletime.Now().After(emptinessTime.Add(ttl)) {
logging.FromContext(ctx).Infof("Triggering termination after %s for empty node %s", ttl, n.Name)
logging.FromContext(ctx).Infof("Triggering termination after %s for empty node", ttl)
if err := r.kubeClient.Delete(ctx, n); err != nil {
return reconcile.Result{}, fmt.Errorf("deleting node %s, %w", n.Name, err)
return reconcile.Result{}, fmt.Errorf("deleting node, %w", err)
}
}
return reconcile.Result{}, nil
Expand All @@ -84,7 +84,7 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi
func (r *Emptiness) isEmpty(ctx context.Context, n *v1.Node) (bool, error) {
pods := &v1.PodList{}
if err := r.kubeClient.List(ctx, pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil {
return false, fmt.Errorf("listing pods for node %s, %w", n.Name, err)
return false, fmt.Errorf("listing pods for node, %w", err)
}
for i := range pods.Items {
p := pods.Items[i]
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/expiration.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (r *Expiration) Reconcile(ctx context.Context, provisioner *v1alpha5.Provis
expirationTTL := time.Duration(ptr.Int64Value(provisioner.Spec.TTLSecondsUntilExpired)) * time.Second
expirationTime := node.CreationTimestamp.Add(expirationTTL)
if injectabletime.Now().After(expirationTime) {
logging.FromContext(ctx).Infof("Triggering termination for expired node %s after %s (+%s)", node.Name, expirationTTL, time.Since(expirationTime))
logging.FromContext(ctx).Infof("Triggering termination for expired node after %s (+%s)", expirationTTL, time.Since(expirationTime))
if err := r.kubeClient.Delete(ctx, node); err != nil {
return reconcile.Result{}, fmt.Errorf("deleting node, %w", err)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/node/liveness.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,9 @@ func (r *Liveness) Reconcile(ctx context.Context, _ *v1alpha5.Provisioner, n *v1
if condition.Reason != "" && condition.Reason != "NodeStatusNeverUpdated" {
return reconcile.Result{}, nil
}
logging.FromContext(ctx).Infof("Triggering termination for node that failed to join %s", n.Name)
logging.FromContext(ctx).Infof("Triggering termination for node that failed to join")
if err := r.kubeClient.Delete(ctx, n); err != nil {
return reconcile.Result{}, fmt.Errorf("deleting node %s, %w", n.Name, err)
return reconcile.Result{}, fmt.Errorf("deleting node, %w", err)
}
return reconcile.Result{}, nil
}
6 changes: 4 additions & 2 deletions pkg/controllers/provisioning/binpacking/packer.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/awslabs/karpenter/pkg/controllers/provisioning/scheduling"
"github.com/awslabs/karpenter/pkg/metrics"
"github.com/awslabs/karpenter/pkg/utils/apiobject"
"github.com/awslabs/karpenter/pkg/utils/injection"
"github.com/awslabs/karpenter/pkg/utils/resources"
"github.com/mitchellh/hashstructure/v2"
"github.com/prometheus/client_golang/prometheus"
Expand All @@ -38,14 +39,15 @@ var (
// MaxInstanceTypes defines the number of instance type options to return to the cloud provider
MaxInstanceTypes = 20

packDuration = prometheus.NewHistogram(
packDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: "allocation_controller",
Name: "binpacking_duration_seconds",
Help: "Duration of binpacking process in seconds.",
Buckets: metrics.DurationBuckets(),
},
[]string{metrics.ProvisionerLabel},
)
)

Expand Down Expand Up @@ -74,7 +76,7 @@ type Packing struct {
// It follows the First Fit Decreasing bin packing technique, reference-
// https://en.wikipedia.org/wiki/Bin_packing_problem#First_Fit_Decreasing_(FFD)
func (p *Packer) Pack(ctx context.Context, schedule *scheduling.Schedule, instances []cloudprovider.InstanceType) []*Packing {
defer metrics.Measure(packDuration)()
defer metrics.Measure(packDuration.WithLabelValues(injection.GetNamespacedName(ctx).Name))()

// Sort pods in decreasing order by the amount of CPU requested, if
// CPU requested is equal compare memory requested.
Expand Down
3 changes: 3 additions & 0 deletions pkg/controllers/provisioning/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/awslabs/karpenter/pkg/controllers/provisioning/binpacking"
"github.com/awslabs/karpenter/pkg/controllers/provisioning/scheduling"
"github.com/awslabs/karpenter/pkg/utils/functional"
"github.com/awslabs/karpenter/pkg/utils/injection"
)

// Controller for the resource
Expand Down Expand Up @@ -60,6 +61,8 @@ func NewController(ctx context.Context, kubeClient client.Client, coreV1Client c
// Reconcile a control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named("provisioning").With("provisioner", req.Name))
ctx = injection.WithNamespacedName(ctx, req.NamespacedName)

provisioner := &v1alpha5.Provisioner{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, provisioner); err != nil {
if errors.IsNotFound(err) {
Expand Down
Loading

0 comments on commit 71ce076

Please sign in to comment.