Skip to content

Commit

Permalink
Removed custom label generation. Refactored scheduling logic in prepa…
Browse files Browse the repository at this point in the history
…ration for multiple provisioner selection
  • Loading branch information
ellistarn committed Nov 10, 2021
1 parent fb06300 commit 4c1055e
Show file tree
Hide file tree
Showing 14 changed files with 337 additions and 284 deletions.
73 changes: 73 additions & 0 deletions pkg/apis/provisioning/v1alpha5/constraints.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha5

import (
"fmt"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)

// Constraints are applied to all nodes created by the provisioner.
type Constraints struct {
// Labels are layered with Requirements and applied to every node.
//+optional
Labels map[string]string `json:"labels,omitempty"`
// Taints will be applied to every node launched by the Provisioner. If
// specified, the provisioner will not provision nodes for pods that do not
// have matching tolerations. Additional taints will be created that match
// pod tolerations on a per-node basis.
// +optional
Taints Taints `json:"taints,omitempty"`
// Requirements are layered with Labels and applied to every node.
Requirements Requirements `json:"requirements,omitempty"`
// Provider contains fields specific to your cloudprovider.
// +kubebuilder:pruning:PreserveUnknownFields
Provider *runtime.RawExtension `json:"provider,omitempty"`
}

// IsSchedulable returns true if the pod can schedule to the node
func (c *Constraints) Supports(pod *v1.Pod) error {
// Tolerate Taints
if err := c.Taints.Tolerates(pod); err != nil {
return err
}
// Requirements are met
for _, requirement := range PodRequirements(pod) {
values := c.Requirements.Requirement(requirement.Key)
if requirement.Operator == v1.NodeSelectorOpIn {
if !values.HasAny(requirement.Values...) {
return fmt.Errorf("requirement %s not met, %s not in %v", requirement.Key, requirement.Values, values.UnsortedList())
}
}
if requirement.Operator == v1.NodeSelectorOpNotIn {
if values.HasAny(requirement.Values...) {
return fmt.Errorf("requirement %s not met, %v must not be in %v", requirement.Key, requirement.Values, values.UnsortedList())
}
}
}
return nil
}

func (c *Constraints) Tighten(pod *v1.Pod) *Constraints {
// Only requirements can constrain for provisioner, need to add node selecotr and node affinity
return &Constraints{
Labels: c.Labels,
Requirements: c.Requirements.With(PodRequirements(pod)).Consolidate().WellKnown(),
Taints: c.Taints.WithPod(pod),
Provider: c.Provider,
}
}
153 changes: 0 additions & 153 deletions pkg/apis/provisioning/v1alpha5/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,7 @@ limitations under the License.
package v1alpha5

import (
"sort"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)

// ProvisionerSpec is the top level provisioner specification. Provisioners
Expand Down Expand Up @@ -53,27 +48,6 @@ type ProvisionerSpec struct {
TTLSecondsUntilExpired *int64 `json:"ttlSecondsUntilExpired,omitempty"`
}

// Constraints are applied to all nodes created by the provisioner.
type Constraints struct {
// Labels are layered with Requirements and applied to every node.
//+optional
Labels map[string]string `json:"labels,omitempty"`
// Taints will be applied to every node launched by the Provisioner. If
// specified, the provisioner will not provision nodes for pods that do not
// have matching tolerations. Additional taints will be created that match
// pod tolerations on a per-node basis.
// +optional
Taints []v1.Taint `json:"taints,omitempty"`
// Requirements are layered with Labels and applied to every node.
Requirements Requirements `json:"requirements,omitempty"`
// Provider contains fields specific to your cloudprovider.
// +kubebuilder:pruning:PreserveUnknownFields
Provider *runtime.RawExtension `json:"provider,omitempty"`
}

// Requirements is a decorated alias type for []v1.NodeSelectorRequirements
type Requirements []v1.NodeSelectorRequirement

// Provisioner is the Schema for the Provisioners API
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=provisioners,scope=Cluster
Expand All @@ -93,130 +67,3 @@ type ProvisionerList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []Provisioner `json:"items"`
}

func (r Requirements) Zones() sets.String {
return r.Requirement(v1.LabelTopologyZone)
}

func (r Requirements) InstanceTypes() sets.String {
return r.Requirement(v1.LabelInstanceTypeStable)
}

func (r Requirements) Architectures() sets.String {
return r.Requirement(v1.LabelArchStable)
}

func (r Requirements) OperatingSystems() sets.String {
return r.Requirement(v1.LabelOSStable)
}

func (r Requirements) CapacityTypes() sets.String {
return r.Requirement(LabelCapacityType)
}

func (r Requirements) WithProvisioner(provisioner Provisioner) Requirements {
return r.
With(provisioner.Spec.Requirements).
WithLabels(provisioner.Spec.Labels).
WithLabels(map[string]string{ProvisionerNameLabelKey: provisioner.Name})
}

func (r Requirements) With(requirements Requirements) Requirements {
return append(r, requirements...)
}

func (r Requirements) WithLabels(labels map[string]string) Requirements {
for key, value := range labels {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
return r
}

func (r Requirements) WithPod(pod *v1.Pod) Requirements {
for key, value := range pod.Spec.NodeSelector {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil {
return r
}
// Select heaviest preference and treat as a requirement. An outer loop will iteratively unconstrain them if unsatisfiable.
if preferred := pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(preferred) > 0 {
sort.Slice(preferred, func(i int, j int) bool { return preferred[i].Weight > preferred[j].Weight })
r = append(r, preferred[0].Preference.MatchExpressions...)
}
// Select first requirement. An outer loop will iteratively remove OR requirements if unsatisfiable
if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 {
r = append(r, pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...)
}
return r
}

// Consolidate combines In and NotIn requirements for each unique key, producing
// an equivalent minimal representation of the requirements. This is useful as
// requirements may be appended from a variety of sources and then consolidated.
// Caution: If a key contains a `NotIn` operator without a corresponding
// `In` operator, the requirement will permanently be [] after consolidation. To
// avoid this, include the broadest `In` requirements before consolidating.
func (r Requirements) Consolidate() (requirements Requirements) {
for _, key := range r.Keys() {
requirements = append(requirements, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: r.Requirement(key).UnsortedList(),
})
}
return requirements
}

func (r Requirements) CustomLabels() map[string]string {
labels := map[string]string{}
for _, key := range r.Keys() {
if !WellKnownLabels.Has(key) {
if requirement := r.Requirement(key); len(requirement) > 0 {
labels[key] = requirement.UnsortedList()[0]
}
}
}
return labels
}

func (r Requirements) WellKnown() (requirements Requirements) {
for _, requirement := range r {
if WellKnownLabels.Has(requirement.Key) {
requirements = append(requirements, requirement)
}
}
return requirements
}

// Keys returns unique set of the label keys from the requirements
func (r Requirements) Keys() []string {
keys := sets.NewString()
for _, requirement := range r {
keys.Insert(requirement.Key)
}
return keys.UnsortedList()
}

// Requirements for the provided key, nil if unconstrained
func (r Requirements) Requirement(key string) sets.String {
var result sets.String
// OpIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpIn {
if result == nil {
result = sets.NewString(requirement.Values...)
} else {
result = result.Intersection(sets.NewString(requirement.Values...))
}
}
}
// OpNotIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpNotIn {
result = result.Difference(sets.NewString(requirement.Values...))
}
}
return result
}
3 changes: 3 additions & 0 deletions pkg/apis/provisioning/v1alpha5/provisioner_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,9 @@ func (r Requirements) Validate() (errs *apis.FieldError) {
}

func validateRequirement(requirement v1.NodeSelectorRequirement) (errs *apis.FieldError) {
if !WellKnownLabels.Has(requirement.Key) {
errs = errs.Also(apis.ErrInvalidKeyName(fmt.Sprintf("%s not in %v", requirement.Key, WellKnownLabels.UnsortedList()), "key"))
}
for _, err := range validation.IsQualifiedName(requirement.Key) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s, %s", requirement.Key, err), "key"))
}
Expand Down
138 changes: 138 additions & 0 deletions pkg/apis/provisioning/v1alpha5/requirements.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha5

import (
"sort"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)

// Requirements is a decorated alias type for []v1.NodeSelectorRequirements
type Requirements []v1.NodeSelectorRequirement

func (r Requirements) Zones() sets.String {
return r.Requirement(v1.LabelTopologyZone)
}

func (r Requirements) InstanceTypes() sets.String {
return r.Requirement(v1.LabelInstanceTypeStable)
}

func (r Requirements) Architectures() sets.String {
return r.Requirement(v1.LabelArchStable)
}

func (r Requirements) CapacityTypes() sets.String {
return r.Requirement(LabelCapacityType)
}

func (r Requirements) With(requirements Requirements) Requirements {
return append(r, requirements...)
}

func LabelRequirements(labels map[string]string) (r Requirements) {
for key, value := range labels {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
return r
}

func PodRequirements(pod *v1.Pod) (r Requirements) {
for key, value := range pod.Spec.NodeSelector {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil {
return r
}
// Select heaviest preference and treat as a requirement. An outer loop will iteratively unconstrain them if unsatisfiable.
if preferred := pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(preferred) > 0 {
sort.Slice(preferred, func(i int, j int) bool { return preferred[i].Weight > preferred[j].Weight })
r = append(r, preferred[0].Preference.MatchExpressions...)
}
// Select first requirement. An outer loop will iteratively remove OR requirements if unsatisfiable
if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 {
r = append(r, pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...)
}
return r
}

// Consolidate combines In and NotIn requirements for each unique key, producing
// an equivalent minimal representation of the requirements. This is useful as
// requirements may be appended from a variety of sources and then consolidated.
// Caution: If a key has contains a `NotIn` operator without a corresponding
// `In` operator, the requirement will permanently be [] after consolidation. To
// avoid this, include the broadest `In` requirements before consolidating.
func (r Requirements) Consolidate() (requirements Requirements) {
for _, key := range r.Keys() {
requirements = append(requirements, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: r.Requirement(key).UnsortedList(),
})
}
return requirements
}

func (r Requirements) Custom() (requirements Requirements) {
for _, requirement := range r {
if !WellKnownLabels.Has(requirement.Key) {
requirements = append(requirements, requirement)
}
}
return requirements
}

func (r Requirements) WellKnown() (requirements Requirements) {
for _, requirement := range r {
if WellKnownLabels.Has(requirement.Key) {
requirements = append(requirements, requirement)
}
}
return requirements
}

// Keys returns unique set of the label keys from the requirements
func (r Requirements) Keys() []string {
keys := sets.NewString()
for _, requirement := range r {
keys.Insert(requirement.Key)
}
return keys.UnsortedList()
}

// Requirements for the provided key, nil if unconstrained
func (r Requirements) Requirement(key string) sets.String {
var result sets.String
// OpIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpIn {
if result == nil {
result = sets.NewString(requirement.Values...)
} else {
result = result.Intersection(sets.NewString(requirement.Values...))
}
}
}
// OpNotIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpNotIn {
result = result.Difference(sets.NewString(requirement.Values...))
}
}
return result
}
Loading

0 comments on commit 4c1055e

Please sign in to comment.