Skip to content

Commit

Permalink
mend
Browse files Browse the repository at this point in the history
  • Loading branch information
damikag committed Jun 10, 2024
1 parent f920c28 commit ea2b801
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 13 deletions.
3 changes: 2 additions & 1 deletion cluster-autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ var (
writeStatusConfigMapFlag = flag.Bool("write-status-configmap", true, "Should CA write status information to a configmap")
statusConfigMapName = flag.String("status-config-map-name", "cluster-autoscaler-status", "Status configmap name")
maxInactivityTimeFlag = flag.Duration("max-inactivity", 10*time.Minute, "Maximum time from last recorded autoscaler activity before automatic restart")
maxBinpackingTimeFlag = flag.Duration("max-binpacking-time", 5*time.Minute, "Maximum time spend on binpacking for a single scale-up. If binpacking is limited by this, scale-up will continue with the already calculated scale-up options.")
maxFailingTimeFlag = flag.Duration("max-failing-time", 15*time.Minute, "Maximum time from last recorded successful autoscaler run before automatic restart")
balanceSimilarNodeGroupsFlag = flag.Bool("balance-similar-node-groups", false, "Detect similar node groups and balance the number of nodes between them")
nodeAutoprovisioningEnabled = flag.Bool("node-autoprovisioning-enabled", false, "Should CA autoprovision node groups when needed.This flag is deprecated and will be removed in future releases.")
Expand Down Expand Up @@ -488,7 +489,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter

opts.Processors = ca_processors.DefaultProcessors(autoscalingOptions)
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime, *forceDaemonSets)
opts.Processors.BinpackingLimiter = binpacking.NewTimeLimiter(*maxInactivityTimeFlag/2, opts.Processors.BinpackingLimiter)
opts.Processors.BinpackingLimiter = binpacking.NewCombinedLimiter([]binpacking.BinpackingLimiter{opts.Processors.BinpackingLimiter, binpacking.NewTimeLimiter(*maxInactivityTimeFlag / 2)})
podListProcessor := podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker)

if autoscalingOptions.ProvisioningRequestEnabled {
Expand Down
66 changes: 66 additions & 0 deletions cluster-autoscaler/processors/binpacking/combined_limiter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package binpacking

import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/expander"
)

// CombinedLimiter combines the outcome of multiple limiters. It will limit
// binpacking when at least one limiter meets the stop condition.
type CombinedLimiter struct {
limiters []BinpackingLimiter
}

// NewCombinedLimiter returns an instance of a new CombinedLimiter
func NewCombinedLimiter(limiters []BinpackingLimiter) *CombinedLimiter {
return &CombinedLimiter{
limiters: limiters,
}
}

// InitBinpacking initialises all the underline limiters.
func (l *CombinedLimiter) InitBinpacking(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup) {
for _, limiter := range l.limiters {
limiter.InitBinpacking(context, nodeGroups)
}
}

// MarkProcessed marks the nodegroup as processed in all underline limiters.
func (l *CombinedLimiter) MarkProcessed(context *context.AutoscalingContext, nodegroupId string) {
for _, limiter := range l.limiters {
limiter.MarkProcessed(context, nodegroupId)
}
}

// StopBinpacking returns true if at least one of the underline limiters met the stop condition.
func (l *CombinedLimiter) StopBinpacking(context *context.AutoscalingContext, evaluatedOptions []expander.Option) bool {
stopCondition := false
for _, limiter := range l.limiters {
stopCondition = limiter.StopBinpacking(context, evaluatedOptions) || stopCondition
}
return stopCondition
}

// FinalizeBinpacking will call FinalizeBinpacking for all the underline limiters.
func (l *CombinedLimiter) FinalizeBinpacking(context *context.AutoscalingContext, finalOptions []expander.Option) {
for _, limiter := range l.limiters {
limiter.FinalizeBinpacking(context, finalOptions)
}
}
16 changes: 5 additions & 11 deletions cluster-autoscaler/processors/binpacking/time_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,43 +25,37 @@ import (
"k8s.io/klog/v2"
)

// TimeLimiter expands binpackingLimiter to limit the total time spends on binpacking
// TimeLimiter imits binpacking based on the total time spends on binpacking.
type TimeLimiter struct {
limiter BinpackingLimiter
startTime time.Time
maxBinpackingDuration time.Duration
}

// NewTimeLimiter returns an instance of a new TimeLimiter
func NewTimeLimiter(maxBinpackingDuration time.Duration, limiter BinpackingLimiter) *TimeLimiter {
// NewTimeLimiter returns an instance of a new TimeLimiter.
func NewTimeLimiter(maxBinpackingDuration time.Duration) *TimeLimiter {
return &TimeLimiter{
limiter: limiter,
maxBinpackingDuration: maxBinpackingDuration,
}
}

// InitBinpacking initialises the TimeLimiter.
func (b *TimeLimiter) InitBinpacking(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup) {
b.limiter.InitBinpacking(context, nodeGroups)
b.startTime = time.Now()
}

// MarkProcessed marks the nodegroup as processed.
func (b *TimeLimiter) MarkProcessed(context *context.AutoscalingContext, nodegroupId string) {
b.limiter.MarkProcessed(context, nodegroupId)
}

// StopBinpacking returns true if the binpacking time exceeds maxBinpackingDuration
// StopBinpacking returns true if the binpacking time exceeds maxBinpackingDuration.
func (b *TimeLimiter) StopBinpacking(context *context.AutoscalingContext, evaluatedOptions []expander.Option) bool {
stopCondition := b.limiter.StopBinpacking(context, evaluatedOptions)
if time.Now().After(b.startTime.Add(b.maxBinpackingDuration)) {
klog.Info("Binpacking is cut short due to maxBinpackingDuration reached.")
return true
}
return stopCondition
return false
}

// FinalizeBinpacking is called to finalize the BinpackingLimiter.
func (b *TimeLimiter) FinalizeBinpacking(context *context.AutoscalingContext, finalOptions []expander.Option) {
b.limiter.FinalizeBinpacking(context, finalOptions)
}
2 changes: 1 addition & 1 deletion cluster-autoscaler/processors/processors.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func DefaultProcessors(options config.AutoscalingOptions) *AutoscalingProcessors
return &AutoscalingProcessors{
PodListProcessor: pods.NewDefaultPodListProcessor(),
NodeGroupListProcessor: nodegroups.NewDefaultNodeGroupListProcessor(),
BinpackingLimiter: binpacking.NewTimeLimiter(options, binpacking.NewDefaultBinpackingLimiter()),
BinpackingLimiter: binpacking.NewDefaultBinpackingLimiter(),
NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{
MaxAllocatableDifferenceRatio: config.DefaultMaxAllocatableDifferenceRatio,
MaxCapacityMemoryDifferenceRatio: config.DefaultMaxCapacityMemoryDifferenceRatio,
Expand Down

0 comments on commit ea2b801

Please sign in to comment.