Skip to content

Commit

Permalink
merge bin-packing and scheduling
Browse files Browse the repository at this point in the history
This solves some issues with topologies and prepares for pod-affinity in
that a separate bin-packing pass would break apart pods that the scheduler
intended to be packed together.  By combining bin-packing with scheduling
the 'scheduling nodes' created correspond to actual K8s nodes that will be
created via the cloud provider, eliminating any errors in counting
hostname topologies.
  • Loading branch information
tzneal committed Mar 25, 2022
1 parent 74c2601 commit 705ae2a
Show file tree
Hide file tree
Showing 18 changed files with 413 additions and 560 deletions.
11 changes: 0 additions & 11 deletions pkg/apis/provisioning/v1alpha5/constraints.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,17 +66,6 @@ func (c *Constraints) ValidatePod(pod *v1.Pod) error {
return nil
}

func (c *Constraints) Tighten(pod *v1.Pod) *Constraints {
requirements := c.Requirements.Add(NewPodRequirements(pod).Requirements...)
return &Constraints{
Labels: c.Labels,
Requirements: requirements,
Taints: c.Taints,
Provider: c.Provider,
KubeletConfiguration: c.KubeletConfiguration,
}
}

func (c *Constraints) ToNode() *v1.Node {
labels := map[string]string{}
for key, value := range c.Labels {
Expand Down
28 changes: 0 additions & 28 deletions pkg/apis/provisioning/v1alpha5/taints.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,34 +24,6 @@ import (
// Taints is a decorated alias type for []v1.Taint
type Taints []v1.Taint

func (ts Taints) WithPod(pod *v1.Pod) Taints {
for _, toleration := range pod.Spec.Tolerations {
// Only OpEqual is supported. OpExists does not make sense for
// provisioning -- in theory we could create a taint on the node with a
// random string, but it's unclear what use case this would accomplish.
if toleration.Operator != v1.TolerationOpEqual {
continue
}
var generated []v1.Taint
// Use effect if defined, otherwise taint all effects
if toleration.Effect != "" {
generated = []v1.Taint{{Key: toleration.Key, Value: toleration.Value, Effect: toleration.Effect}}
} else {
generated = []v1.Taint{
{Key: toleration.Key, Value: toleration.Value, Effect: v1.TaintEffectNoSchedule},
{Key: toleration.Key, Value: toleration.Value, Effect: v1.TaintEffectNoExecute},
}
}
// Only add taints that do not already exist on constraints
for _, taint := range generated {
if !ts.Has(taint) {
ts = append(ts, taint)
}
}
}
return ts
}

// Has returns true if taints has a taint for the given key and effect
func (ts Taints) Has(taint v1.Taint) bool {
for _, t := range ts {
Expand Down
2 changes: 2 additions & 0 deletions pkg/cloudprovider/aws/cloudprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ const (
CacheTTL = 60 * time.Second
// CacheCleanupInterval triggers cache cleanup (lazy eviction) at this interval.
CacheCleanupInterval = 10 * time.Minute
// MaxInstanceTypes defines the number of instance type options to pass to CreateFleet
MaxInstanceTypes = 20
)

func init() {
Expand Down
5 changes: 4 additions & 1 deletion pkg/cloudprovider/aws/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,11 @@ func NewInstanceProvider(ec2api ec2iface.EC2API, instanceTypeProvider *InstanceT
// If spot is not used, the instanceTypes are not required to be sorted
// because we are using ec2 fleet's lowest-price OD allocation strategy
func (p *InstanceProvider) Create(ctx context.Context, constraints *v1alpha1.Constraints, instanceTypes []cloudprovider.InstanceType) (*v1.Node, error) {
// Launch Instance
instanceTypes = p.filterInstanceTypes(instanceTypes)
if len(instanceTypes) > MaxInstanceTypes {
instanceTypes = instanceTypes[0:MaxInstanceTypes]
}

id, err := p.launchInstance(ctx, constraints, instanceTypes)
if err != nil {
return nil, err
Expand Down
56 changes: 28 additions & 28 deletions pkg/cloudprovider/aws/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -714,35 +714,35 @@ var _ = Describe("Allocation", func() {
})
})
Context("Ephemeral Storage", func() {
It("should pack pods when a daemonset has an ephemeral-storage request", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("1Gi")}},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner, test.UnschedulablePod())
ExpectScheduled(ctx, env.Client, pod[0])
})
It("should pack pods with any ephemeral-storage request", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner,
test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceEphemeralStorage: resource.MustParse("1G"),
}}}))
ExpectScheduled(ctx, env.Client, pod[0])
})
It("should pack pods with large ephemeral-storage request", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner,
test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceEphemeralStorage: resource.MustParse("1Pi"),
}}}))
ExpectScheduled(ctx, env.Client, pod[0])
})
It("should pack pods when a daemonset has an ephemeral-storage request", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("1Gi")}},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner, test.UnschedulablePod())
ExpectScheduled(ctx, env.Client, pod[0])
})
It("should pack pods with any ephemeral-storage request", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner,
test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceEphemeralStorage: resource.MustParse("1G"),
}}}))
ExpectScheduled(ctx, env.Client, pod[0])
})
It("should pack pods with large ephemeral-storage request", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner,
test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceEphemeralStorage: resource.MustParse("1Pi"),
}}}))
ExpectScheduled(ctx, env.Client, pod[0])
})
})
})
Context("Defaulting", func() {
// Intent here is that if updates occur on the controller, the Provisioner doesn't need to be recreated
Expand Down
142 changes: 0 additions & 142 deletions pkg/controllers/provisioning/binpacking/packable.go

This file was deleted.

Loading

0 comments on commit 705ae2a

Please sign in to comment.