Skip to content

Commit

Permalink
Removed custom label generation. Refactored scheduling logic in prepa…
Browse files Browse the repository at this point in the history
…ration for multiple provisioner selection
  • Loading branch information
ellistarn committed Nov 11, 2021
1 parent fb06300 commit a095c98
Show file tree
Hide file tree
Showing 17 changed files with 361 additions and 390 deletions.
72 changes: 72 additions & 0 deletions pkg/apis/provisioning/v1alpha5/constraints.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha5

import (
"fmt"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)

// Constraints are applied to all nodes created by the provisioner.
type Constraints struct {
// Labels are layered with Requirements and applied to every node.
//+optional
Labels map[string]string `json:"labels,omitempty"`
// Taints will be applied to every node launched by the Provisioner. If
// specified, the provisioner will not provision nodes for pods that do not
// have matching tolerations. Additional taints will be created that match
// pod tolerations on a per-node basis.
// +optional
Taints Taints `json:"taints,omitempty"`
// Requirements are layered with Labels and applied to every node.
Requirements Requirements `json:"requirements,omitempty"`
// Provider contains fields specific to your cloudprovider.
// +kubebuilder:pruning:PreserveUnknownFields
Provider *runtime.RawExtension `json:"provider,omitempty"`
}

// Supports returns true if the pod's requirements are met by the constraints
func (c *Constraints) Supports(pod *v1.Pod) error {
// Tolerate Taints
if err := c.Taints.Tolerates(pod); err != nil {
return err
}
// The constraints do not support this requirement
podRequirements := PodRequirements(pod)
for _, key := range podRequirements.Keys() {
if c.Requirements.Requirement(key).Len() == 0 {
return fmt.Errorf("%s is too constrained", key)
}
}
// The combined requirements are not compatible
combined := c.Requirements.With(podRequirements)
for _, key := range podRequirements.Keys() {
if combined.Requirement(key).Len() == 0 {
return fmt.Errorf("%s is too constrained", key)
}
}
return nil
}

func (c *Constraints) Tighten(pod *v1.Pod) *Constraints {
return &Constraints{
Labels: c.Labels,
Requirements: c.Requirements.With(PodRequirements(pod)).Consolidate().WellKnown(),
Taints: c.Taints.WithPod(pod),
Provider: c.Provider,
}
}
153 changes: 0 additions & 153 deletions pkg/apis/provisioning/v1alpha5/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,7 @@ limitations under the License.
package v1alpha5

import (
"sort"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)

// ProvisionerSpec is the top level provisioner specification. Provisioners
Expand Down Expand Up @@ -53,27 +48,6 @@ type ProvisionerSpec struct {
TTLSecondsUntilExpired *int64 `json:"ttlSecondsUntilExpired,omitempty"`
}

// Constraints are applied to all nodes created by the provisioner.
type Constraints struct {
// Labels are layered with Requirements and applied to every node.
//+optional
Labels map[string]string `json:"labels,omitempty"`
// Taints will be applied to every node launched by the Provisioner. If
// specified, the provisioner will not provision nodes for pods that do not
// have matching tolerations. Additional taints will be created that match
// pod tolerations on a per-node basis.
// +optional
Taints []v1.Taint `json:"taints,omitempty"`
// Requirements are layered with Labels and applied to every node.
Requirements Requirements `json:"requirements,omitempty"`
// Provider contains fields specific to your cloudprovider.
// +kubebuilder:pruning:PreserveUnknownFields
Provider *runtime.RawExtension `json:"provider,omitempty"`
}

// Requirements is a decorated alias type for []v1.NodeSelectorRequirements
type Requirements []v1.NodeSelectorRequirement

// Provisioner is the Schema for the Provisioners API
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=provisioners,scope=Cluster
Expand All @@ -93,130 +67,3 @@ type ProvisionerList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []Provisioner `json:"items"`
}

func (r Requirements) Zones() sets.String {
return r.Requirement(v1.LabelTopologyZone)
}

func (r Requirements) InstanceTypes() sets.String {
return r.Requirement(v1.LabelInstanceTypeStable)
}

func (r Requirements) Architectures() sets.String {
return r.Requirement(v1.LabelArchStable)
}

func (r Requirements) OperatingSystems() sets.String {
return r.Requirement(v1.LabelOSStable)
}

func (r Requirements) CapacityTypes() sets.String {
return r.Requirement(LabelCapacityType)
}

func (r Requirements) WithProvisioner(provisioner Provisioner) Requirements {
return r.
With(provisioner.Spec.Requirements).
WithLabels(provisioner.Spec.Labels).
WithLabels(map[string]string{ProvisionerNameLabelKey: provisioner.Name})
}

func (r Requirements) With(requirements Requirements) Requirements {
return append(r, requirements...)
}

func (r Requirements) WithLabels(labels map[string]string) Requirements {
for key, value := range labels {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
return r
}

func (r Requirements) WithPod(pod *v1.Pod) Requirements {
for key, value := range pod.Spec.NodeSelector {
r = append(r, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}})
}
if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil {
return r
}
// Select heaviest preference and treat as a requirement. An outer loop will iteratively unconstrain them if unsatisfiable.
if preferred := pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(preferred) > 0 {
sort.Slice(preferred, func(i int, j int) bool { return preferred[i].Weight > preferred[j].Weight })
r = append(r, preferred[0].Preference.MatchExpressions...)
}
// Select first requirement. An outer loop will iteratively remove OR requirements if unsatisfiable
if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 {
r = append(r, pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...)
}
return r
}

// Consolidate combines In and NotIn requirements for each unique key, producing
// an equivalent minimal representation of the requirements. This is useful as
// requirements may be appended from a variety of sources and then consolidated.
// Caution: If a key contains a `NotIn` operator without a corresponding
// `In` operator, the requirement will permanently be [] after consolidation. To
// avoid this, include the broadest `In` requirements before consolidating.
func (r Requirements) Consolidate() (requirements Requirements) {
for _, key := range r.Keys() {
requirements = append(requirements, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: r.Requirement(key).UnsortedList(),
})
}
return requirements
}

func (r Requirements) CustomLabels() map[string]string {
labels := map[string]string{}
for _, key := range r.Keys() {
if !WellKnownLabels.Has(key) {
if requirement := r.Requirement(key); len(requirement) > 0 {
labels[key] = requirement.UnsortedList()[0]
}
}
}
return labels
}

func (r Requirements) WellKnown() (requirements Requirements) {
for _, requirement := range r {
if WellKnownLabels.Has(requirement.Key) {
requirements = append(requirements, requirement)
}
}
return requirements
}

// Keys returns unique set of the label keys from the requirements
func (r Requirements) Keys() []string {
keys := sets.NewString()
for _, requirement := range r {
keys.Insert(requirement.Key)
}
return keys.UnsortedList()
}

// Requirements for the provided key, nil if unconstrained
func (r Requirements) Requirement(key string) sets.String {
var result sets.String
// OpIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpIn {
if result == nil {
result = sets.NewString(requirement.Values...)
} else {
result = result.Intersection(sets.NewString(requirement.Values...))
}
}
}
// OpNotIn
for _, requirement := range r {
if requirement.Key == key && requirement.Operator == v1.NodeSelectorOpNotIn {
result = result.Difference(sets.NewString(requirement.Values...))
}
}
return result
}
34 changes: 7 additions & 27 deletions pkg/apis/provisioning/v1alpha5/provisioner_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,6 @@ func (s *ProvisionerSpec) validate(ctx context.Context) (errs *apis.FieldError)
return errs.Also(
s.validateTTLSecondsUntilExpired(),
s.validateTTLSecondsAfterEmpty(),
// This validation is on the ProvisionerSpec despite the fact that
// labels are a property of Constraints. This is necessary because
// validation is applied to constraints that include pod overrides.
// These labels are restricted when creating provisioners, but are not
// restricted for pods since they're necessary to override constraints.
s.validateRestrictedLabels(),
s.Constraints.Validate(ctx),
)
}
Expand All @@ -65,17 +59,6 @@ func (s *ProvisionerSpec) validateTTLSecondsAfterEmpty() (errs *apis.FieldError)
return errs
}

func (s *ProvisionerSpec) validateRestrictedLabels() (errs *apis.FieldError) {
for key := range s.Labels {
for _, restricted := range RestrictedLabels {
if strings.HasPrefix(key, restricted) {
errs = errs.Also(apis.ErrInvalidKeyName(key, "labels"))
}
}
}
return errs
}

// Validate the constraints
func (c *Constraints) Validate(ctx context.Context) (errs *apis.FieldError) {
return errs.Also(
Expand All @@ -94,6 +77,9 @@ func (c *Constraints) validateLabels() (errs *apis.FieldError) {
for _, err := range validation.IsValidLabelValue(value) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s, %s", value, err), fmt.Sprintf("labels[%s]", key)))
}
if RestrictedLabels.Has(key) {
errs = errs.Also(apis.ErrInvalidKeyName(key, "labels", "label is restricted"))
}
if _, ok := WellKnownLabels[key]; !ok && IsRestrictedLabelDomain(key) {
errs = errs.Also(apis.ErrInvalidKeyName(key, "labels", "label domain not allowed"))
}
Expand All @@ -103,7 +89,7 @@ func (c *Constraints) validateLabels() (errs *apis.FieldError) {

func IsRestrictedLabelDomain(key string) bool {
labelDomain := getLabelDomain(key)
for _, restrictedLabelDomain := range RestrictedLabelDomains {
for restrictedLabelDomain := range RestrictedLabelDomains {
if strings.HasSuffix(labelDomain, restrictedLabelDomain) {
return true
}
Expand Down Expand Up @@ -152,16 +138,10 @@ func (c *Constraints) validateRequirements() (errs *apis.FieldError) {
return errs
}

func (r Requirements) Validate() (errs *apis.FieldError) {
for _, label := range r.Keys() {
if r.Requirement(label).Len() == 0 {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("%s is too constrained", label)))
}
}
return errs
}

func validateRequirement(requirement v1.NodeSelectorRequirement) (errs *apis.FieldError) {
if !WellKnownLabels.Has(requirement.Key) {
errs = errs.Also(apis.ErrInvalidKeyName(fmt.Sprintf("%s not in %v", requirement.Key, WellKnownLabels.UnsortedList()), "key"))
}
for _, err := range validation.IsQualifiedName(requirement.Key) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s, %s", requirement.Key, err), "key"))
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/apis/provisioning/v1alpha5/register.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,25 +41,26 @@ var (

var (
// RestrictedLabels are injected by Cloud Providers
RestrictedLabels = []string{
RestrictedLabels = sets.NewString(
// Used internally by provisioning logic
EmptinessTimestampAnnotationKey,
v1.LabelHostname,
}
)
// These are either prohibited by the kubelet or reserved by karpenter
KarpenterLabelDomain = "karpenter.sh"
RestrictedLabelDomains = []string{
RestrictedLabelDomains = sets.NewString(
"kubernetes.io",
"k8s.io",
KarpenterLabelDomain,
}
)
LabelCapacityType = KarpenterLabelDomain + "/capacity-type"
// WellKnownLabels supported by karpenter
WellKnownLabels = sets.NewString(
v1.LabelTopologyZone,
v1.LabelInstanceTypeStable,
v1.LabelArchStable,
LabelCapacityType,
v1.LabelHostname, // Used internally for hostname topology spread
)
DefaultHook = func(ctx context.Context, constraints *Constraints) {}
ValidateHook = func(ctx context.Context, constraints *Constraints) *apis.FieldError { return nil }
Expand Down
Loading

0 comments on commit a095c98

Please sign in to comment.