From b0cd570b042ebb466ec86b67cf66c203901ffb3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Fri, 13 May 2022 16:41:10 +0200 Subject: [PATCH] Move handing unremovable nodes to dedicated object --- .../core/scaledown/legacy/legacy.go | 134 ++++++------------ .../core/scaledown/legacy/legacy_test.go | 11 +- .../core/scaledown/unremovable/nodes.go | 106 ++++++++++++++ .../core/scaledown/unremovable/nodes_test.go | 112 +++++++++++++++ 4 files changed, 264 insertions(+), 99 deletions(-) create mode 100644 cluster-autoscaler/core/scaledown/unremovable/nodes.go create mode 100644 cluster-autoscaler/core/scaledown/unremovable/nodes_test.go diff --git a/cluster-autoscaler/core/scaledown/legacy/legacy.go b/cluster-autoscaler/core/scaledown/legacy/legacy.go index 566a6ee779fb..cf37e169bb93 100644 --- a/cluster-autoscaler/core/scaledown/legacy/legacy.go +++ b/cluster-autoscaler/core/scaledown/legacy/legacy.go @@ -28,6 +28,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker" + "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable" core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/processors" @@ -268,18 +269,17 @@ func (limits *scaleDownResourcesLimits) tryDecrementLimitsByDelta(delta scaleDow // ScaleDown is responsible for maintaining the state needed to perform unneeded node removals. type ScaleDown struct { - context *context.AutoscalingContext - processors *processors.AutoscalingProcessors - clusterStateRegistry *clusterstate.ClusterStateRegistry - unneededNodes map[string]time.Time - unneededNodesList []*apiv1.Node - unremovableNodes map[string]time.Time - podLocationHints map[string]string - nodeUtilizationMap map[string]utilization.Info - usageTracker *simulator.UsageTracker - nodeDeletionTracker *deletiontracker.NodeDeletionTracker - unremovableNodeReasons map[string]*simulator.UnremovableNode - removalSimulator *simulator.RemovalSimulator + context *context.AutoscalingContext + processors *processors.AutoscalingProcessors + clusterStateRegistry *clusterstate.ClusterStateRegistry + unneededNodes map[string]time.Time + unneededNodesList []*apiv1.Node + unremovableNodes *unremovable.Nodes + podLocationHints map[string]string + nodeUtilizationMap map[string]utilization.Info + usageTracker *simulator.UsageTracker + nodeDeletionTracker *deletiontracker.NodeDeletionTracker + removalSimulator *simulator.RemovalSimulator } // NewScaleDown builds new ScaleDown object. @@ -287,18 +287,17 @@ func NewScaleDown(context *context.AutoscalingContext, processors *processors.Au usageTracker := simulator.NewUsageTracker() removalSimulator := simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, context.PredicateChecker, usageTracker) return &ScaleDown{ - context: context, - processors: processors, - clusterStateRegistry: clusterStateRegistry, - unneededNodes: make(map[string]time.Time), - unremovableNodes: make(map[string]time.Time), - podLocationHints: make(map[string]string), - nodeUtilizationMap: make(map[string]utilization.Info), - usageTracker: usageTracker, - unneededNodesList: make([]*apiv1.Node, 0), - nodeDeletionTracker: deletiontracker.NewNodeDeletionTracker(0 * time.Second), - unremovableNodeReasons: make(map[string]*simulator.UnremovableNode), - removalSimulator: removalSimulator, + context: context, + processors: processors, + clusterStateRegistry: clusterStateRegistry, + unneededNodes: make(map[string]time.Time), + unremovableNodes: unremovable.NewNodes(), + podLocationHints: make(map[string]string), + nodeUtilizationMap: make(map[string]utilization.Info), + usageTracker: usageTracker, + unneededNodesList: make([]*apiv1.Node, 0), + nodeDeletionTracker: deletiontracker.NewNodeDeletionTracker(0 * time.Second), + removalSimulator: removalSimulator, } } @@ -307,7 +306,6 @@ func (sd *ScaleDown) CleanUp(timestamp time.Time) { // Use default ScaleDownUnneededTime as in this context the value // doesn't apply to any specific NodeGroup. sd.usageTracker.CleanUp(timestamp.Add(-sd.context.NodeGroupDefaults.ScaleDownUnneededTime)) - sd.clearUnremovableNodeReasons() } // CleanUpUnneededNodes clears the list of unneeded nodes. @@ -323,7 +321,7 @@ func (sd *ScaleDown) UnneededNodes() []*apiv1.Node { func (sd *ScaleDown) checkNodeUtilization(timestamp time.Time, node *apiv1.Node, nodeInfo *schedulerframework.NodeInfo) (simulator.UnremovableReason, *utilization.Info) { // Skip nodes that were recently checked. - if _, found := sd.unremovableNodes[node.Name]; found { + if sd.unremovableNodes.IsRecent(node.Name) { return simulator.RecentlyUnremovable, nil } @@ -394,7 +392,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( return errors.ToAutoscalerError(errors.InternalError, err) } - sd.updateUnremovableNodes(timestamp) + sd.unremovableNodes.Update(sd.context.ClusterSnapshot.NodeInfos(), timestamp) skipped := 0 utilizationMap := make(map[string]utilization.Info) @@ -406,7 +404,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( nodeInfo, err := sd.context.ClusterSnapshot.NodeInfos().Get(node.Name) if err != nil { klog.Errorf("Can't retrieve scale-down candidate %s from snapshot, err: %v", node.Name, err) - sd.addUnremovableNodeReason(node, simulator.UnexpectedError) + sd.unremovableNodes.AddReason(node, simulator.UnexpectedError) continue } @@ -420,7 +418,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( skipped++ } - sd.addUnremovableNodeReason(node, reason) + sd.unremovableNodes.AddReason(node, reason) continue } @@ -520,18 +518,17 @@ func (sd *ScaleDown) UpdateUnneededNodes( if len(unremovable) > 0 { unremovableTimeout := timestamp.Add(sd.context.AutoscalingOptions.UnremovableNodeRecheckTimeout) for _, unremovableNode := range unremovable { - sd.unremovableNodes[unremovableNode.Node.Name] = unremovableTimeout - sd.addUnremovableNode(unremovableNode) + sd.unremovableNodes.AddTimeout(unremovableNode, unremovableTimeout) } klog.V(1).Infof("%v nodes found to be unremovable in simulation, will re-check them at %v", len(unremovable), unremovableTimeout) } // This method won't always check all nodes, so let's give a generic reason for all nodes that weren't checked. for _, node := range scaleDownCandidates { - _, unremovableReasonProvided := sd.unremovableNodeReasons[node.Name] + unremovableReasonProvided := sd.unremovableNodes.HasReason(node.Name) _, unneeded := result[node.Name] if !unneeded && !unremovableReasonProvided { - sd.addUnremovableNodeReason(node, simulator.NotUnneededOtherReason) + sd.unremovableNodes.AddReason(node, simulator.NotUnneededOtherReason) } } @@ -576,59 +573,10 @@ func (sd *ScaleDown) isNodeBelowUtilizationThreshold(node *apiv1.Node, nodeGroup return true, nil } -// updateUnremovableNodes updates unremovableNodes map according to current -// state of the cluster. Removes from the map nodes that are no longer in the -// nodes list. -func (sd *ScaleDown) updateUnremovableNodes(timestamp time.Time) { - if len(sd.unremovableNodes) <= 0 { - return - } - newUnremovableNodes := make(map[string]time.Time, len(sd.unremovableNodes)) - for oldUnremovable, ttl := range sd.unremovableNodes { - if _, err := sd.context.ClusterSnapshot.NodeInfos().Get(oldUnremovable); err != nil { - // Not logging on error level as most likely cause is that node is no longer in the cluster. - klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable map, err: %v", oldUnremovable, err) - continue - } - if ttl.After(timestamp) { - // Keep nodes that are still in the cluster and haven't expired yet. - newUnremovableNodes[oldUnremovable] = ttl - } - } - sd.unremovableNodes = newUnremovableNodes -} - -func (sd *ScaleDown) clearUnremovableNodeReasons() { - sd.unremovableNodeReasons = make(map[string]*simulator.UnremovableNode) -} - -func (sd *ScaleDown) addUnremovableNodeReason(node *apiv1.Node, reason simulator.UnremovableReason) { - sd.unremovableNodeReasons[node.Name] = &simulator.UnremovableNode{Node: node, Reason: reason, BlockingPod: nil} -} - -func (sd *ScaleDown) addUnremovableNode(unremovableNode *simulator.UnremovableNode) { - sd.unremovableNodeReasons[unremovableNode.Node.Name] = unremovableNode -} - -// UnremovableNodesCount returns a map of unremovable node counts per reason. -func (sd *ScaleDown) UnremovableNodesCount() map[simulator.UnremovableReason]int { - reasons := make(map[simulator.UnremovableReason]int) - - for _, node := range sd.unremovableNodeReasons { - reasons[node.Reason]++ - } - - return reasons -} - // UnremovableNodes returns a list of nodes that cannot be removed according to // the scale down algorithm. func (sd *ScaleDown) UnremovableNodes() []*simulator.UnremovableNode { - ns := make([]*simulator.UnremovableNode, 0, len(sd.unremovableNodeReasons)) - for _, n := range sd.unremovableNodeReasons { - ns = append(ns, n) - } - return ns + return sd.unremovableNodes.AsList() } // markSimulationError indicates a simulation error by clearing relevant scale @@ -735,7 +683,7 @@ func (sd *ScaleDown) TryToScaleDown( // Check if node is marked with no scale down annotation. if hasNoScaleDownAnnotation(node) { klog.V(4).Infof("Skipping %s - scale down disabled annotation found", node.Name) - sd.addUnremovableNodeReason(node, simulator.ScaleDownDisabledAnnotation) + sd.unremovableNodes.AddReason(node, simulator.ScaleDownDisabledAnnotation) continue } @@ -745,12 +693,12 @@ func (sd *ScaleDown) TryToScaleDown( nodeGroup, err := sd.context.CloudProvider.NodeGroupForNode(node) if err != nil { klog.Errorf("Error while checking node group for %s: %v", node.Name, err) - sd.addUnremovableNodeReason(node, simulator.UnexpectedError) + sd.unremovableNodes.AddReason(node, simulator.UnexpectedError) continue } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { klog.V(4).Infof("Skipping %s - no node group config", node.Name) - sd.addUnremovableNodeReason(node, simulator.NotAutoscaled) + sd.unremovableNodes.AddReason(node, simulator.NotAutoscaled) continue } @@ -762,7 +710,7 @@ func (sd *ScaleDown) TryToScaleDown( continue } if !unneededSince.Add(unneededTime).Before(currentTime) { - sd.addUnremovableNodeReason(node, simulator.NotUnneededLongEnough) + sd.unremovableNodes.AddReason(node, simulator.NotUnneededLongEnough) continue } } else { @@ -773,7 +721,7 @@ func (sd *ScaleDown) TryToScaleDown( continue } if !unneededSince.Add(unreadyTime).Before(currentTime) { - sd.addUnremovableNodeReason(node, simulator.NotUnreadyLongEnough) + sd.unremovableNodes.AddReason(node, simulator.NotUnreadyLongEnough) continue } } @@ -781,28 +729,28 @@ func (sd *ScaleDown) TryToScaleDown( size, found := nodeGroupSize[nodeGroup.Id()] if !found { klog.Errorf("Error while checking node group size %s: group size not found in cache", nodeGroup.Id()) - sd.addUnremovableNodeReason(node, simulator.UnexpectedError) + sd.unremovableNodes.AddReason(node, simulator.UnexpectedError) continue } deletionsInProgress := sd.nodeDeletionTracker.DeletionsCount(nodeGroup.Id()) if size-deletionsInProgress <= nodeGroup.MinSize() { klog.V(1).Infof("Skipping %s - node group min size reached", node.Name) - sd.addUnremovableNodeReason(node, simulator.NodeGroupMinSizeReached) + sd.unremovableNodes.AddReason(node, simulator.NodeGroupMinSizeReached) continue } scaleDownResourcesDelta, err := sd.computeScaleDownResourcesDelta(sd.context.CloudProvider, node, nodeGroup, resourcesWithLimits) if err != nil { klog.Errorf("Error getting node resources: %v", err) - sd.addUnremovableNodeReason(node, simulator.UnexpectedError) + sd.unremovableNodes.AddReason(node, simulator.UnexpectedError) continue } checkResult := scaleDownResourcesLeft.checkScaleDownDeltaWithinLimits(scaleDownResourcesDelta) if checkResult.exceeded { klog.V(4).Infof("Skipping %s - minimal limit exceeded for %v", node.Name, checkResult.exceededResources) - sd.addUnremovableNodeReason(node, simulator.MinimalResourceLimitExceeded) + sd.unremovableNodes.AddReason(node, simulator.MinimalResourceLimitExceeded) continue } @@ -851,7 +799,7 @@ func (sd *ScaleDown) TryToScaleDown( findNodesToRemoveDuration = time.Now().Sub(findNodesToRemoveStart) for _, unremovableNode := range unremovable { - sd.addUnremovableNode(unremovableNode) + sd.unremovableNodes.Add(unremovableNode) } if err != nil { diff --git a/cluster-autoscaler/core/scaledown/legacy/legacy_test.go b/cluster-autoscaler/core/scaledown/legacy/legacy_test.go index 189036007270..f4a90fee5bf1 100644 --- a/cluster-autoscaler/core/scaledown/legacy/legacy_test.go +++ b/cluster-autoscaler/core/scaledown/legacy/legacy_test.go @@ -39,6 +39,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker" + "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable" . "k8s.io/autoscaler/cluster-autoscaler/core/test" "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" @@ -169,15 +170,13 @@ func TestFindUnneededNodes(t *testing.T) { assert.False(t, found, n) } - sd.unremovableNodes = make(map[string]time.Time) + sd.unremovableNodes = unremovable.NewNodes() sd.unneededNodes["n1"] = time.Now() allNodes = []*apiv1.Node{n1, n2, n3, n4} simulator.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, allNodes, []*apiv1.Pod{p1, p2, p3, p4}) autoscalererr = sd.UpdateUnneededNodes(allNodes, allNodes, time.Now(), nil) assert.NoError(t, autoscalererr) - sd.unremovableNodes = make(map[string]time.Time) - assert.Equal(t, 1, len(sd.unneededNodes)) addTime2, found := sd.unneededNodes["n2"] assert.True(t, found) @@ -191,7 +190,7 @@ func TestFindUnneededNodes(t *testing.T) { assert.False(t, found, n) } - sd.unremovableNodes = make(map[string]time.Time) + sd.unremovableNodes = unremovable.NewNodes() scaleDownCandidates := []*apiv1.Node{n1, n3, n4} simulator.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, allNodes, []*apiv1.Pod{p1, p2, p3, p4}) autoscalererr = sd.UpdateUnneededNodes(allNodes, scaleDownCandidates, time.Now(), nil) @@ -207,7 +206,7 @@ func TestFindUnneededNodes(t *testing.T) { assert.Equal(t, 0, len(sd.unneededNodes)) // Verify that no other nodes are in unremovable map. - assert.Equal(t, 1, len(sd.unremovableNodes)) + assert.Equal(t, 1, len(sd.unremovableNodes.AsList())) // But it should be checked after timeout simulator.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, allNodes, []*apiv1.Pod{}) @@ -216,7 +215,7 @@ func TestFindUnneededNodes(t *testing.T) { assert.Equal(t, 1, len(sd.unneededNodes)) // Verify that nodes that are no longer unremovable are removed. - assert.Equal(t, 0, len(sd.unremovableNodes)) + assert.Equal(t, 0, len(sd.unremovableNodes.AsList())) } func TestFindUnneededGPUNodes(t *testing.T) { diff --git a/cluster-autoscaler/core/scaledown/unremovable/nodes.go b/cluster-autoscaler/core/scaledown/unremovable/nodes.go new file mode 100644 index 000000000000..2b0292e9cdd8 --- /dev/null +++ b/cluster-autoscaler/core/scaledown/unremovable/nodes.go @@ -0,0 +1,106 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unremovable + +import ( + "time" + + "k8s.io/autoscaler/cluster-autoscaler/simulator" + + apiv1 "k8s.io/api/core/v1" + klog "k8s.io/klog/v2" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// Nodes tracks the state of cluster nodes that cannot be removed. +type Nodes struct { + ttls map[string]time.Time + reasons map[string]*simulator.UnremovableNode +} + +// NewNodes returns a new initialized Nodes object. +func NewNodes() *Nodes { + return &Nodes{ + ttls: make(map[string]time.Time), + reasons: make(map[string]*simulator.UnremovableNode), + } +} + +// NodeInfoGetter is anything that can return NodeInfo object by name. +type NodeInfoGetter interface { + Get(name string) (*schedulerframework.NodeInfo, error) +} + +// Update updates the internal structure according to current state of the +// cluster. Removes the nodes that are no longer in the nodes list. +func (n *Nodes) Update(nodeInfos NodeInfoGetter, timestamp time.Time) { + n.reasons = make(map[string]*simulator.UnremovableNode) + if len(n.ttls) <= 0 { + return + } + newTTLs := make(map[string]time.Time, len(n.ttls)) + for name, ttl := range n.ttls { + if _, err := nodeInfos.Get(name); err != nil { + // Not logging on error level as most likely cause is that node is no longer in the cluster. + klog.Infof("Can't retrieve node %s from snapshot, removing from unremovable nodes, err: %v", name, err) + continue + } + if ttl.After(timestamp) { + // Keep nodes that are still in the cluster and haven't expired yet. + newTTLs[name] = ttl + } + } + n.ttls = newTTLs +} + +// Add adds an unremovable node. +func (n *Nodes) Add(node *simulator.UnremovableNode) { + n.reasons[node.Node.Name] = node +} + +// AddTimeout adds a new unremovable node with a timeout until which the node +// should be considered unremovable. +func (n *Nodes) AddTimeout(node *simulator.UnremovableNode, timeout time.Time) { + n.ttls[node.Node.Name] = timeout + n.Add(node) +} + +// AddReason adds an unremovable node due to the specified reason. +func (n *Nodes) AddReason(node *apiv1.Node, reason simulator.UnremovableReason) { + n.Add(&simulator.UnremovableNode{Node: node, Reason: reason, BlockingPod: nil}) +} + +// AsList returns a list of unremovable nodes. +func (n *Nodes) AsList() []*simulator.UnremovableNode { + ns := make([]*simulator.UnremovableNode, 0, len(n.reasons)) + for _, node := range n.reasons { + ns = append(ns, node) + } + return ns +} + +// HasReason returns true iff a given node has a reason to be unremovable. +func (n *Nodes) HasReason(nodeName string) bool { + _, found := n.reasons[nodeName] + return found +} + +// IsRecent returns true iff a given node was recently added +func (n *Nodes) IsRecent(nodeName string) bool { + _, found := n.ttls[nodeName] + return found +} diff --git a/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go b/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go new file mode 100644 index 000000000000..179ceac89ae0 --- /dev/null +++ b/cluster-autoscaler/core/scaledown/unremovable/nodes_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unremovable + +import ( + "fmt" + "testing" + "time" + + "k8s.io/autoscaler/cluster-autoscaler/simulator" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +var ( + beforeUpdate = time.UnixMilli(1652455130000) + updateTime = time.UnixMilli(1652455131111) + afterUpdate = time.UnixMilli(1652455132222) +) + +func TestUpdate(t *testing.T) { + testCases := map[string]struct { + unremovable map[string]time.Time + nodes []string + want int + }{ + "empty": {}, + "one removed via ttl": { + unremovable: map[string]time.Time{ + "n1": beforeUpdate, + "n2": afterUpdate, + }, + nodes: []string{"n1", "n2"}, + want: 1, + }, + "one missing from cluster": { + unremovable: map[string]time.Time{ + "n1": afterUpdate, + "n2": afterUpdate, + }, + nodes: []string{"n2"}, + want: 1, + }, + "all stay": { + unremovable: map[string]time.Time{ + "n1": afterUpdate, + "n2": afterUpdate, + }, + nodes: []string{"n1", "n2"}, + want: 2, + }, + } + for desc, tc := range testCases { + n := NewNodes() + niGetter := newFakeNodeInfoGetter(tc.nodes) + for name, timeout := range tc.unremovable { + n.AddTimeout(makeUnremovableNode(name), timeout) + } + n.Update(niGetter, updateTime) + got := len(n.ttls) + if got != tc.want { + t.Errorf("%s: got %d nodes, want %d", desc, got, tc.want) + } + } +} + +type fakeNodeInfoGetter struct { + names map[string]bool +} + +func (f *fakeNodeInfoGetter) Get(name string) (*schedulerframework.NodeInfo, error) { + // We don't actually care about the node info object itself, just its presence. + _, found := f.names[name] + if found { + return nil, nil + } + return nil, fmt.Errorf("not found") +} + +func newFakeNodeInfoGetter(ns []string) *fakeNodeInfoGetter { + names := make(map[string]bool, len(ns)) + for _, n := range ns { + names[n] = true + } + return &fakeNodeInfoGetter{names} +} + +func makeUnremovableNode(name string) *simulator.UnremovableNode { + return &simulator.UnremovableNode{ + Node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +}