Skip to content

Commit

Permalink
Fix missing requests when only limits are supplied
Browse files Browse the repository at this point in the history
  • Loading branch information
dewjam committed Apr 5, 2022
1 parent dac3c4c commit c10f773
Show file tree
Hide file tree
Showing 4 changed files with 151 additions and 6 deletions.
35 changes: 35 additions & 0 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2305,6 +2305,41 @@ var _ = Describe("Binpacking", func() {
}
Expect(nodeNames).To(HaveLen(5))
})
It("should take into account initContainer resource requests when binpacking", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner, test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("2"),
},
},
}))[0]
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type"))
})
It("should not schedule pods when initContainer resource requests are greater than available instance types", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioners, provisioner, test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Ti"),
v1.ResourceCPU: resource.MustParse("2"),
},
},
}))[0]
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should select for valid instance types, regardless of price", func() {
// capacity sizes and prices don't correlate here, regardless we should filter and see that all three instance types
// are valid before preferring the cheapest one 'large'
Expand Down
64 changes: 64 additions & 0 deletions pkg/controllers/provisioning/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,70 @@ var _ = Describe("Provisioning", func() {
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if resource requests are not defined and limits (requests) are too large", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule based on the max resource requests of containers and initContainers", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
node := ExpectScheduled(ctx, env.Client, pod)
Expect(*node.Status.Allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*node.Status.Allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should not schedule if combined max resources are too large for any node", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if initContainer resources are too large", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
},
}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should be able to schedule pods if resource requests and limits are not defined", func() {
ExpectCreated(ctx, env.Client, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{}},
))
pod := ExpectProvisioned(ctx, env.Client, selectionController, provisioningController, provisioner, test.UnschedulablePod(test.PodOptions{}))[0]
ExpectScheduled(ctx, env.Client, pod)
})
It("should ignore daemonsets without matching tolerations", func() {
provisioner.Spec.Taints = v1alpha5.Taints{{Key: "foo", Value: "bar", Effect: v1.TaintEffectNoSchedule}}
ExpectCreated(ctx, env.Client, test.DaemonSet(
Expand Down
6 changes: 6 additions & 0 deletions pkg/test/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ type PodOptions struct {
Image string
NodeName string
PriorityClassName string
InitResourceRequirements v1.ResourceRequirements
ResourceRequirements v1.ResourceRequirements
NodeSelector map[string]string
NodeRequirements []v1.NodeSelectorRequirement
Expand Down Expand Up @@ -90,6 +91,11 @@ func Pod(overrides ...PodOptions) *v1.Pod {
Affinity: buildAffinity(options),
TopologySpreadConstraints: options.TopologySpreadConstraints,
Tolerations: options.Tolerations,
InitContainers: []v1.Container{{
Name: strings.ToLower(sequentialRandomName()),
Image: options.Image,
Resources: options.InitResourceRequirements,
}},
Containers: []v1.Container{{
Name: strings.ToLower(sequentialRandomName()),
Image: options.Image,
Expand Down
52 changes: 46 additions & 6 deletions pkg/utils/resources/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@ import (
func RequestsForPods(pods ...*v1.Pod) v1.ResourceList {
var resources []v1.ResourceList
for _, pod := range pods {
for _, container := range pod.Spec.Containers {
resources = append(resources, container.Resources.Requests)
}
resources = append(resources, Ceiling(pod).Requests)
}
merged := Merge(resources...)
merged[v1.ResourcePods] = *resource.NewQuantity(int64(len(pods)), resource.DecimalExponent)
Expand All @@ -38,9 +36,7 @@ func RequestsForPods(pods ...*v1.Pod) v1.ResourceList {
func LimitsForPods(pods ...*v1.Pod) v1.ResourceList {
var resources []v1.ResourceList
for _, pod := range pods {
for _, container := range pod.Spec.Containers {
resources = append(resources, container.Resources.Limits)
}
resources = append(resources, Ceiling(pod).Limits)
}
merged := Merge(resources...)
merged[v1.ResourcePods] = *resource.NewQuantity(int64(len(pods)), resource.DecimalExponent)
Expand All @@ -63,6 +59,50 @@ func Merge(resources ...v1.ResourceList) v1.ResourceList {
return result
}

// Ceiling calculates the max between the sum of container resources and max of initContainers
func Ceiling(pod *v1.Pod) v1.ResourceRequirements {
var resources v1.ResourceRequirements
for _, container := range pod.Spec.Containers {
resources.Requests = Merge(resources.Requests, MergeResourceLimitsIntoRequests(container))
resources.Limits = Merge(resources.Limits, container.Resources.Limits)
}
for _, container := range pod.Spec.InitContainers {
resources.Requests = MaxResources(resources.Requests, MergeResourceLimitsIntoRequests(container))
resources.Limits = MaxResources(resources.Limits, container.Resources.Limits)
}
return resources
}

// MaxResources returns the maximum quantities for a given list of resources
func MaxResources(resources ...v1.ResourceList) v1.ResourceList {
resourceList := v1.ResourceList{}
for _, resource := range resources {
for resourceName, quantity := range resource {
if value, ok := resourceList[resourceName]; !ok || quantity.Cmp(value) > 0 {
resourceList[resourceName] = quantity
}
}
}
return resourceList
}

// MergeResourceLimitsIntoRequests merges resource limits into requests if no request exists for the given resource
func MergeResourceLimitsIntoRequests(container v1.Container) v1.ResourceList {
resources := container.Resources.DeepCopy()
if resources.Requests == nil {
resources.Requests = v1.ResourceList{}
}

if resources.Limits != nil {
for resourceName, quantity := range resources.Limits {
if _, ok := resources.Requests[resourceName]; !ok {
resources.Requests[resourceName] = quantity
}
}
}
return resources.Requests
}

// Quantity parses the string value into a *Quantity
func Quantity(value string) *resource.Quantity {
r := resource.MustParse(value)
Expand Down

0 comments on commit c10f773

Please sign in to comment.