Skip to content

Commit

Permalink
Update flags, Improve tests readability & use Bypass instead of ignor…
Browse files Browse the repository at this point in the history
…e in naming
  • Loading branch information
atwamahmoud committed Nov 22, 2023
1 parent 4635a6d commit a1ae4d3
Show file tree
Hide file tree
Showing 7 changed files with 246 additions and 140 deletions.
9 changes: 2 additions & 7 deletions cluster-autoscaler/config/autoscaling_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,11 +277,6 @@ type AutoscalingOptions struct {
// dynamicNodeDeleteDelayAfterTaintEnabled is used to enable/disable dynamic adjustment of NodeDeleteDelayAfterTaint
// based on the latency between the CA and the api-server
DynamicNodeDeleteDelayAfterTaintEnabled bool
//IgnoreSchedulerProcessing is used to signal whether CA will/won't wait
//for scheduler to mark pods as unschedulable and will process both marked & non-marked pods
//it will also signal whether we enable/disable waiting for pod time buffers before triggering a scale-up.
IgnoreSchedulerProcessing bool
//IgnoredSchedulers are used to specify which schedulers to ignore their processing
//if IgnoreSchedulerProcessing is set to true
IgnoredSchedulers map[string]bool
// BypassedSchedulers are used to specify which schedulers to bypass their processing
BypassedSchedulers map[string]bool
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,14 @@ func NewFilterOutExpendablePodListProcessor() *filterOutExpendable {
func (p *filterOutExpendable) Process(context *context.AutoscalingContext, pods []*apiv1.Pod) ([]*apiv1.Pod, error) {
nodes, err := context.AllNodeLister().List()
if err != nil {
klog.Warningf("Failed to list all nodes while filtering expendable: %v", err)
return nil, err
}
expendablePodsPriorityCutoff := context.AutoscalingOptions.ExpendablePodsPriorityCutoff

unschedulablePods, waitingForLowerPriorityPreemption := core_utils.FilterOutExpendableAndSplit(pods, nodes, expendablePodsPriorityCutoff)
if err = p.addPreemptingPodsToSnapshot(waitingForLowerPriorityPreemption, context); err != nil {
klog.Warningf("Failed to add preempting pods to snapshot: %v", err)
return nil, err
}

Expand Down
9 changes: 5 additions & 4 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,9 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
}
originalScheduledPods, unschedulablePods := kube_util.ScheduledPods(pods), kube_util.UnschedulablePods(pods)
schedulerUnprocessed := make([]*apiv1.Pod, 0, 0)
if a.IgnoreSchedulerProcessing {
schedulerUnprocessed = kube_util.SchedulerUnprocessedPods(pods, a.IgnoredSchedulers)
isSchedulerProcessingIgnored := len(a.BypassedSchedulers) > 0
if isSchedulerProcessingIgnored {
schedulerUnprocessed = kube_util.SchedulerUnprocessedPods(pods, a.BypassedSchedulers)
}

// Update cluster resource usage metrics
Expand Down Expand Up @@ -456,7 +457,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr

// SchedulerUnprocessed might be zero here if it was disabled
metrics.UpdateUnschedulablePodsCount(len(unschedulablePods), len(schedulerUnprocessed))
if a.IgnoreSchedulerProcessing {
if isSchedulerProcessingIgnored {
// Treat unknown pods as unschedulable, pod list processor will remove schedulable pods
unschedulablePods = append(unschedulablePods, schedulerUnprocessed...)
}
Expand Down Expand Up @@ -543,7 +544,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
} else if a.MaxNodesTotal > 0 && len(readyNodes) >= a.MaxNodesTotal {
scaleUpStatus.Result = status.ScaleUpNoOptionsAvailable
klog.V(1).Info("Max total nodes in cluster reached")
} else if !a.IgnoreSchedulerProcessing && allPodsAreNew(unschedulablePodsToHelp, currentTime) {
} else if !isSchedulerProcessingIgnored && allPodsAreNew(unschedulablePodsToHelp, currentTime) {
// The assumption here is that these pods have been created very recently and probably there
// is more pods to come. In theory we could check the newest pod time but then if pod were created
// slowly but at the pace of 1 every 2 seconds then no scale up would be triggered for long time.
Expand Down
Loading

0 comments on commit a1ae4d3

Please sign in to comment.