-
Notifications
You must be signed in to change notification settings - Fork 4k
/
mixed_nodeinfos_processor.go
176 lines (156 loc) · 5.95 KB
/
mixed_nodeinfos_processor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeinfosprovider
import (
"reflect"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
klog "k8s.io/klog/v2"
)
// MixedTemplateNodeInfoProvider build nodeInfos from the cluster's nodes and node groups.
type MixedTemplateNodeInfoProvider struct {
nodeInfoCache map[string]*schedulerframework.NodeInfo
}
// NewMixedTemplateNodeInfoProvider returns a NodeInfoProvider processor building
// NodeInfos from real-world nodes when available, otherwise from node groups templates.
func NewMixedTemplateNodeInfoProvider() *MixedTemplateNodeInfoProvider {
return &MixedTemplateNodeInfoProvider{
nodeInfoCache: make(map[string]*schedulerframework.NodeInfo),
}
}
// CleanUp cleans up processor's internal structures.
func (p *MixedTemplateNodeInfoProvider) CleanUp() {
}
// Process returns the nodeInfos set for this cluster
func (p *MixedTemplateNodeInfoProvider) Process(nodes []*apiv1.Node, ctx *context.AutoscalingContext, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) {
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
result := make(map[string]*schedulerframework.NodeInfo)
seenGroups := make(map[string]bool)
podsForNodes, err := getPodsForNodes(ctx.ListerRegistry)
if err != nil {
return map[string]*schedulerframework.NodeInfo{}, err
}
// processNode returns information whether the nodeTemplate was generated and if there was an error.
processNode := func(node *apiv1.Node) (bool, string, errors.AutoscalerError) {
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
if err != nil {
return false, "", errors.ToAutoscalerError(errors.CloudProviderError, err)
}
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
return false, "", nil
}
id := nodeGroup.Id()
if _, found := result[id]; !found {
// Build nodeInfo.
nodeInfo, err := simulator.BuildNodeInfoForNode(node, podsForNodes)
if err != nil {
return false, "", err
}
sanitizedNodeInfo, err := utils.SanitizeNodeInfo(nodeInfo, id, ignoredTaints)
if err != nil {
return false, "", err
}
result[id] = sanitizedNodeInfo
return true, id, nil
}
return false, "", nil
}
for _, node := range nodes {
// Broken nodes might have some stuff missing. Skipping.
if !kube_util.IsNodeReadyAndSchedulable(node) {
continue
}
added, id, typedErr := processNode(node)
if typedErr != nil {
return map[string]*schedulerframework.NodeInfo{}, typedErr
}
if added && p.nodeInfoCache != nil {
if nodeInfoCopy, err := utils.DeepCopyNodeInfo(result[id]); err == nil {
p.nodeInfoCache[id] = nodeInfoCopy
}
}
}
for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
id := nodeGroup.Id()
seenGroups[id] = true
if _, found := result[id]; found {
continue
}
// No good template, check cache of previously running nodes.
if p.nodeInfoCache != nil {
if nodeInfo, found := p.nodeInfoCache[id]; found {
if nodeInfoCopy, err := utils.DeepCopyNodeInfo(nodeInfo); err == nil {
result[id] = nodeInfoCopy
continue
}
}
}
// No good template, trying to generate one. This is called only if there are no
// working nodes in the node groups. By default CA tries to use a real-world example.
nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, ctx.PredicateChecker, ignoredTaints)
if err != nil {
if err == cloudprovider.ErrNotImplemented {
continue
} else {
klog.Errorf("Unable to build proper template node for %s: %v", id, err)
return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
}
}
result[id] = nodeInfo
}
// Remove invalid node groups from cache
for id := range p.nodeInfoCache {
if _, ok := seenGroups[id]; !ok {
delete(p.nodeInfoCache, id)
}
}
// Last resort - unready/unschedulable nodes.
for _, node := range nodes {
// Allowing broken nodes
if !kube_util.IsNodeReadyAndSchedulable(node) {
added, _, typedErr := processNode(node)
if typedErr != nil {
return map[string]*schedulerframework.NodeInfo{}, typedErr
}
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
if err != nil {
return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(
errors.CloudProviderError, err)
}
if added {
klog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name)
}
}
}
return result, nil
}
func getPodsForNodes(listers kube_util.ListerRegistry) (map[string][]*apiv1.Pod, errors.AutoscalerError) {
pods, err := listers.ScheduledPodLister().List()
if err != nil {
return nil, errors.ToAutoscalerError(errors.ApiCallError, err)
}
podsForNodes := map[string][]*apiv1.Pod{}
for _, p := range pods {
podsForNodes[p.Spec.NodeName] = append(podsForNodes[p.Spec.NodeName], p)
}
return podsForNodes, nil
}