Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce log spam in AtomicResizeFilteringProcessor #6509

Merged
merged 1 commit into from
Feb 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
klog "k8s.io/klog/v2"
)

Expand Down Expand Up @@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct {

// GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates
func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved {
atomicQuota := klogx.NodesLoggingQuota()
standardQuota := klogx.NodesLoggingQuota()
nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{}
result := []simulator.NodeToBeRemoved{}
for _, node := range candidates {
Expand All @@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali
continue
}
if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling {
klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name)
klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name)
nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node)
} else {
klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name)
klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name)
result = append(result, node)
}
}
klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left())
klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left())
for nodeGroup, nodes := range nodesByGroup {
ngSize, err := nodeGroup.TargetSize()
if err != nil {
Expand Down
14 changes: 14 additions & 0 deletions cluster-autoscaler/utils/klogx/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@ const (
// MaxPodsLoggedV5 is the maximum number of pods for which we will
// log detailed information every loop at verbosity >= 5.
MaxPodsLoggedV5 = 1000
// MaxNodesLogged is the maximum number of nodes for which we will
// log detailed information every loop at verbosity < 5.
MaxNodesLogged = 20
// MaxNodesLoggedV5 is the maximum number of nodes for which we will
// log detailed information every loop at verbosity >= 5.
MaxNodesLoggedV5 = 1000
)

// PodsLoggingQuota returns a new quota with default limit for pods at current verbosity.
Expand All @@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota {
}
return NewLoggingQuota(MaxPodsLogged)
}

// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity.
func NodesLoggingQuota() *Quota {
if klog.V(5).Enabled() {
return NewLoggingQuota(MaxNodesLoggedV5)
}
return NewLoggingQuota(MaxNodesLogged)
}
Loading