diff --git a/go.mod b/go.mod index 0abd893..ac85330 100644 --- a/go.mod +++ b/go.mod @@ -9,9 +9,9 @@ godebug default=go1.23 require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0-20241105230147-1ddf895d7e74 - k8s.io/apimachinery v0.0.0-20241105225905-b5e810677b4f - k8s.io/client-go v0.0.0-20241105230542-c1010ffd7de3 + k8s.io/api v0.0.0-20241127162655-f8e5e36c84f1 + k8s.io/apimachinery v0.0.0-20241108022104-96b97de8d6ba + k8s.io/client-go v0.0.0-20241108115833-955401ca9a15 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -53,3 +53,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20240920213627-16af2ff33fbf diff --git a/go.sum b/go.sum index 74b1f01..c65602c 100644 --- a/go.sum +++ b/go.sum @@ -134,12 +134,12 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.0.0-20241105230147-1ddf895d7e74 h1:omoqr99s5DbyApsEajh0iIyKzGL/5vkvEgHn0P/q/NQ= -k8s.io/api v0.0.0-20241105230147-1ddf895d7e74/go.mod h1:QMjNGKwUJOiB0TWCMJWLvhiVAvOrl9I+MTeV0dr56NE= -k8s.io/apimachinery v0.0.0-20241105225905-b5e810677b4f h1:MTmedS366tu07Nh6HBoXS90/6DA5gP62gMyYTF+lT+Q= -k8s.io/apimachinery v0.0.0-20241105225905-b5e810677b4f/go.mod h1:HqhdaJUgQqky29T1V0o2yFkt/pZqLFIDyn9Zi/8rxoY= -k8s.io/client-go v0.0.0-20241105230542-c1010ffd7de3 h1:ZgoctdRlDLgbqPdEdcgyoyBbVGgNmfcokEkFYjpHZ68= -k8s.io/client-go v0.0.0-20241105230542-c1010ffd7de3/go.mod h1:p8opQH8f5aM1YzHrN0yWNcD4qqfAJIaR0+kVTU/TIBw= +k8s.io/api v0.0.0-20241127162655-f8e5e36c84f1 h1:MTqd8524+MzN0Kxt42qAvh/aUYC18yz1BJUmfWADaDg= +k8s.io/api v0.0.0-20241127162655-f8e5e36c84f1/go.mod h1:qs155+gTdM43TXy/cV8a8yOjDeNR8kGJc82AraJrh/c= +k8s.io/apimachinery v0.0.0-20240920213627-16af2ff33fbf h1:ZRwu8YHh3bFbQU4NRvHB6fiovWLBouxY86wIcLd7sBA= +k8s.io/apimachinery v0.0.0-20240920213627-16af2ff33fbf/go.mod h1:HqhdaJUgQqky29T1V0o2yFkt/pZqLFIDyn9Zi/8rxoY= +k8s.io/client-go v0.0.0-20241108115833-955401ca9a15 h1:0OnDjUEXj3mhxJUiBBRGOLrXQ8aj5zvidfDlI2SCoew= +k8s.io/client-go v0.0.0-20241108115833-955401ca9a15/go.mod h1:DojZKPG2ohOKreFao9yo2wFG1IL9OXebT+Q9ytnREY8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/resource/helpers.go b/resource/helpers.go index a8b252c..3bdcef6 100644 --- a/resource/helpers.go +++ b/resource/helpers.go @@ -18,6 +18,7 @@ package resource import ( v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" ) // ContainerType signifies container type @@ -46,15 +47,101 @@ type PodResourcesOptions struct { // NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources // with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified. NonMissingContainerRequests v1.ResourceList + // SkipPodLevelResources controls whether pod-level resources should be skipped + // from the calculation. If pod-level resources are not set in PodSpec, + // pod-level resources will always be skipped. + SkipPodLevelResources bool } -// PodRequests computes the pod requests per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then -// the requests are returned including pod overhead. The computation is part of the API and must be reviewed -// as an API change. +var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory) + +func SupportedPodLevelResources() sets.Set[v1.ResourceName] { + return supportedPodLevelResources +} + +// IsSupportedPodLevelResources checks if a given resource is supported by pod-level +// resource management through the PodLevelResources feature. Returns true if +// the resource is supported. +func IsSupportedPodLevelResource(name v1.ResourceName) bool { + return supportedPodLevelResources.Has(name) +} + +// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set. +// It returns true if either the Requests or Limits maps are non-empty. +func IsPodLevelResourcesSet(pod *v1.Pod) bool { + if pod.Spec.Resources == nil { + return false + } + + if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 { + return false + } + + for resourceName := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + for resourceName := range pod.Spec.Resources.Limits { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + return false +} + +// IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if +// Requests map is non-empty. +func IsPodLevelRequestsSet(pod *v1.Pod) bool { + if pod.Spec.Resources == nil { + return false + } + + if len(pod.Spec.Resources.Requests) == 0 { + return false + } + + for resourceName := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + return true + } + } + + return false +} + +// PodRequests computes the total pod requests per the PodResourcesOptions supplied. +// If PodResourcesOptions is nil, then the requests are returned including pod overhead. +// If the PodLevelResources feature is enabled AND the pod-level resources are set, +// those pod-level values are used in calculating Pod Requests. +// The computation is part of the API and must be reviewed as an API change. func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + reqs := AggregateContainerRequests(pod, opts) + if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) { + for resourceName, quantity := range pod.Spec.Resources.Requests { + if IsSupportedPodLevelResource(resourceName) { + reqs[resourceName] = quantity + } + } + } + + // Add overhead for running a pod to the sum of requests if requested: + if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { + addResourceList(reqs, pod.Spec.Overhead) + } + + return reqs +} + +// AggregateContainerRequests computes the total resource requests of all the containers +// in a pod. This computation folows the formula defined in the KEP for sidecar +// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission +// for more details. +func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise reqs := reuseOrClearResourceList(opts.Reuse) - var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) @@ -124,12 +211,6 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { } maxResourceList(reqs, initContainerReqs) - - // Add overhead for running a pod to the sum of requests if requested: - if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { - addResourceList(reqs, pod.Spec.Overhead) - } - return reqs } @@ -155,8 +236,35 @@ func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.Resour // as an API change. func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise - limits := reuseOrClearResourceList(opts.Reuse) + limits := AggregateContainerLimits(pod, opts) + if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) { + for resourceName, quantity := range pod.Spec.Resources.Limits { + if IsSupportedPodLevelResource(resourceName) { + limits[resourceName] = quantity + } + } + } + + // Add overhead to non-zero limits if requested: + if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { + for name, quantity := range pod.Spec.Overhead { + if value, ok := limits[name]; ok && !value.IsZero() { + value.Add(quantity) + limits[name] = value + } + } + } + + return limits +} +// AggregateContainerLimits computes the aggregated resource limits of all the containers +// in a pod. This computation follows the formula defined in the KEP for sidecar +// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission +// for more details. +func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { + // attempt to reuse the maps if passed, or allocate otherwise + limits := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) @@ -216,17 +324,6 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { } maxResourceList(limits, initContainerLimits) - - // Add overhead to non-zero limits if requested: - if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { - for name, quantity := range pod.Spec.Overhead { - if value, ok := limits[name]; ok && !value.IsZero() { - value.Add(quantity) - limits[name] = value - } - } - } - return limits } diff --git a/resource/helpers_test.go b/resource/helpers_test.go index f2d984c..e146a31 100644 --- a/resource/helpers_test.go +++ b/resource/helpers_test.go @@ -1228,10 +1228,529 @@ func TestPodResourceLimits(t *testing.T) { } } +func TestIsPodLevelResourcesSet(t *testing.T) { + testCases := []struct { + name string + podResources *v1.ResourceRequirements + expected bool + }{ + { + name: "nil resources struct", + expected: false, + }, + { + name: "empty resources struct", + podResources: &v1.ResourceRequirements{}, + expected: false, + }, + { + name: "only unsupported resource requests set", + podResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("1Mi")}, + }, + expected: false, + }, + { + name: "only unsupported resource limits set", + podResources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("1Mi")}, + }, + expected: false, + }, + { + name: "unsupported and suported resources requests set", + podResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceEphemeralStorage: resource.MustParse("1Mi"), + v1.ResourceCPU: resource.MustParse("1m"), + }, + }, + expected: true, + }, + { + name: "unsupported and suported resources limits set", + podResources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceEphemeralStorage: resource.MustParse("1Mi"), + v1.ResourceCPU: resource.MustParse("1m"), + }, + }, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testPod := &v1.Pod{Spec: v1.PodSpec{Resources: tc.podResources}} + if got := IsPodLevelResourcesSet(testPod); got != tc.expected { + t.Errorf("got=%t, want=%t", got, tc.expected) + } + }) + } + +} + +func TestPodLevelResourceRequests(t *testing.T) { + restartAlways := v1.ContainerRestartPolicyAlways + testCases := []struct { + name string + opts PodResourcesOptions + podResources v1.ResourceRequirements + overhead v1.ResourceList + initContainers []v1.Container + containers []v1.Container + expectedRequests v1.ResourceList + }{ + { + name: "nil", + expectedRequests: v1.ResourceList{}, + }, + { + name: "pod level memory resource with SkipPodLevelResources true", + podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, + opts: PodResourcesOptions{SkipPodLevelResources: true}, + expectedRequests: v1.ResourceList{}, + }, + { + name: "pod level memory resource with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}, + }, + { + name: "pod level memory and container level cpu resources with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2m")}}, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi"), v1.ResourceCPU: resource.MustParse("2m")}, + }, + { + name: "pod level unsupported resources set at both pod-level and container-level with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("2Mi")}}, + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("3Mi")}}, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("3Mi")}, + }, + { + name: "pod level unsupported resources set at pod-level with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("2Mi")}}, + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi")}}, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi")}, + }, + { + name: "only container level resources set with SkipPodLevelResources false", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m")}, + }, + { + name: "both container-level and pod-level resources set with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("6Mi"), + v1.ResourceCPU: resource.MustParse("8m"), + }, + }, + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("6Mi"), v1.ResourceCPU: resource.MustParse("8m")}, + }, + { + name: "container-level resources and init container set with SkipPodLevelResources false", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("5Mi"), + v1.ResourceCPU: resource.MustParse("4m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m")}, + }, + { + name: "container-level resources and init container set with SkipPodLevelResources false", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("5Mi"), + v1.ResourceCPU: resource.MustParse("4m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: true}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m")}, + }, + { + name: "container-level resources and sidecar container set with SkipPodLevelResources false", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("5Mi"), + v1.ResourceCPU: resource.MustParse("4m"), + }, + }, + RestartPolicy: &restartAlways, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("8Mi"), v1.ResourceCPU: resource.MustParse("6m")}, + }, + { + name: "container-level resources, init and sidecar container set with SkipPodLevelResources false", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("5Mi"), + v1.ResourceCPU: resource.MustParse("4m"), + }, + }, + RestartPolicy: &restartAlways, + }, + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("6Mi"), + v1.ResourceCPU: resource.MustParse("8m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("11Mi"), v1.ResourceCPU: resource.MustParse("12m")}, + }, + { + name: "pod-level resources, container-level resources, init and sidecar container set with SkipPodLevelResources false", + podResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("15Mi"), + v1.ResourceCPU: resource.MustParse("18m"), + }, + }, + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3Mi"), + v1.ResourceCPU: resource.MustParse("2m"), + }, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("5Mi"), + v1.ResourceCPU: resource.MustParse("4m"), + }, + }, + RestartPolicy: &restartAlways, + }, + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("6Mi"), + v1.ResourceCPU: resource.MustParse("8m"), + }, + }, + }, + }, + opts: PodResourcesOptions{SkipPodLevelResources: false}, + expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceCPU: resource.MustParse("18m")}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + podReqs := PodRequests(getPodLevelResourcesPod(tc.podResources, tc.overhead, tc.containers, tc.initContainers), tc.opts) + if diff := cmp.Diff(podReqs, tc.expectedRequests); diff != "" { + t.Errorf("got=%v, want=%v, diff=%s", podReqs, tc.expectedRequests, diff) + } + }) + } +} + +func TestAggregateContainerRequestsAndLimits(t *testing.T) { + restartAlways := v1.ContainerRestartPolicyAlways + cases := []struct { + containers []v1.Container + initContainers []v1.Container + name string + expectedRequests v1.ResourceList + expectedLimits v1.ResourceList + }{ + { + name: "one container with limits", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{}, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), + }, + }, + { + name: "two containers with limits", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{}, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("18"), + }, + }, + { + name: "one container with requests", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + name: "two containers with requests", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("18"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + name: "regular and init containers with requests", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + name: "regular, init and sidecar containers with requests", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("8")}, + }, + RestartPolicy: &restartAlways, + }, + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("6")}, + }, + }, + }, + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + name: "regular and init containers with limits", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + expectedRequests: v1.ResourceList{}, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), + }, + }, + { + name: "regular, init and sidecar containers with limits", + containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, + }, + }, + }, + initContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("8")}, + }, + RestartPolicy: &restartAlways, + }, + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("6")}, + }, + }, + }, + expectedRequests: v1.ResourceList{}, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"), + }, + }, + } + + for idx, tc := range cases { + testPod := &v1.Pod{Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers}} + resRequests := AggregateContainerRequests(testPod, PodResourcesOptions{}) + resLimits := AggregateContainerLimits(testPod, PodResourcesOptions{}) + + if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { + t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests) + } + + if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { + t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits) + } + } +} + type podResources struct { cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string } +func getPodLevelResourcesPod(podResources v1.ResourceRequirements, overhead v1.ResourceList, containers, initContainers []v1.Container) *v1.Pod { + return &v1.Pod{ + Spec: v1.PodSpec{ + Resources: &podResources, + Containers: containers, + InitContainers: initContainers, + Overhead: overhead, + }, + } +} + +// TODO(ndixita): refactor to re-use getPodResourcesPod() func getPod(cname string, resources podResources) *v1.Pod { r := v1.ResourceRequirements{ Limits: make(v1.ResourceList),