From ae4fa7123c88e92952261976ded6f6ad747dd000 Mon Sep 17 00:00:00 2001 From: Ukri Niemimuukko Date: Thu, 4 Jan 2024 11:21:09 +0200 Subject: [PATCH 1/3] support xe plugin resource type Signed-off-by: Ukri Niemimuukko --- .../pkg/gpuscheduler/scheduler.go | 54 ++++++++++++------- .../pkg/gpuscheduler/utils.go | 10 ++-- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/gpu-aware-scheduling/pkg/gpuscheduler/scheduler.go b/gpu-aware-scheduling/pkg/gpuscheduler/scheduler.go index 70dfa12..12bac7c 100644 --- a/gpu-aware-scheduling/pkg/gpuscheduler/scheduler.go +++ b/gpu-aware-scheduling/pkg/gpuscheduler/scheduler.go @@ -46,9 +46,11 @@ const ( metadataAnnotations = "/metadata/annotations/" cardPrefix = "card" gpuListLabel = gpuPrefix + "cards" - gpuMonitoringResource = gpuPrefix + "i915_monitoring" + i915MonitoringResource = gpuPrefix + "i915_monitoring" + xeMonitoringResource = gpuPrefix + "xe_monitoring" gpuNumbersLabel = gpuPrefix + "gpu-numbers" - gpuPluginResource = gpuPrefix + "i915" + i915PluginResource = gpuPrefix + "i915" + xePluginResource = gpuPrefix + "xe" gpuTileResource = gpuPrefix + "tiles" numaMappingLabel = gpuPrefix + "numa-gpu-map" logL1 = klog.Level(1) @@ -258,21 +260,29 @@ func getPerGPUResourceCapacity(node *v1.Node, gpuCount int) resourceMap { func getPerGPUResourceRequest(containerRequest resourceMap) (resourceMap, int64) { perGPUResourceRequest := containerRequest.newCopy() - numI915 := getNumI915(containerRequest) + numGPUReq := getNumGPUReq(containerRequest) - if numI915 > 1 { - err := perGPUResourceRequest.divide(int(numI915)) + if numGPUReq > 1 { + err := perGPUResourceRequest.divide(int(numGPUReq)) if err != nil { return perGPUResourceRequest, 0 } } - return perGPUResourceRequest, numI915 + return perGPUResourceRequest, numGPUReq } -func getNumI915(containerRequest resourceMap) int64 { - if numI915, ok := containerRequest[gpuPluginResource]; ok && numI915 > 0 { - return numI915 +func getPluginResourceName(containerRequest resourceMap) string { + if numXe, ok := containerRequest[xePluginResource]; ok && numXe > 0 { + return xePluginResource + } + + return i915PluginResource +} + +func getNumGPUReq(containerRequest resourceMap) int64 { + if numGPUReq, ok := containerRequest[getPluginResourceName(containerRequest)]; ok && numGPUReq > 0 { + return numGPUReq } return 0 @@ -606,9 +616,9 @@ func (m *GASExtender) getXELinkedCardsForContainerGPURequest(containerRequest, p usedGPUmap := map[string]bool{} // figure out container resources per gpu - perGPUResourceRequest, numI915 := getPerGPUResourceRequest(containerRequest) + perGPUResourceRequest, numGPUReq := getPerGPUResourceRequest(containerRequest) - if numI915%2 != 0 { + if numGPUReq%2 != 0 { klog.Errorf("xe-linked allocations must have an even numbered gpu resource request") return []Card{}, preferred, errBadArgs @@ -616,7 +626,7 @@ func (m *GASExtender) getXELinkedCardsForContainerGPURequest(containerRequest, p preferredCard := "" - for gpuNum := int64(0); gpuNum < numI915; gpuNum += 2 { + for gpuNum := int64(0); gpuNum < numGPUReq; gpuNum += 2 { gpuNames := getSortedGPUNamesForNode(nodeResourcesUsed) if m.balancedResource != "" { @@ -764,7 +774,9 @@ func combineSamegpuResourceRequests(indexMap map[int]bool, resourceRequests []re } } - combinedResources[gpuPluginResource] = 1 + pluginResourceName := getPluginResourceName(combinedResources) + + combinedResources[pluginResourceName] = 1 return combinedResources, nil } @@ -975,10 +987,12 @@ func (m *GASExtender) getCardForSamegpu(samegpuIndexMap map[int]bool, allContain return []Card{}, false, fail } - // combinedResourcesRequest ends up with a hard-coded 1 i915 resource only, so we prune the gpuMapCopy, if needed - reallyNeededI915Resources := len(samegpuIndexMap) + gpuPluginResource := getPluginResourceName(combinedResourcesRequest) + + // combinedResourcesRequest ends up with a hard-coded 1 plugin resource only, so we prune the gpuMapCopy, if needed + reallyNeededPluginResources := len(samegpuIndexMap) for gpuName, gpuUsedResources := range nodeResourcesUsed { - if perGPUCapacity[gpuPluginResource]-gpuUsedResources[gpuPluginResource] < int64(reallyNeededI915Resources) { + if perGPUCapacity[gpuPluginResource]-gpuUsedResources[gpuPluginResource] < int64(reallyNeededPluginResources) { delete(gpuMapCopy, gpuName) } } @@ -1584,7 +1598,7 @@ func sanitizeSamegpuResourcesRequest( return nil } - samegpuProhibitedResources := []string{gpuTileResource, gpuMonitoringResource} + samegpuProhibitedResources := []string{gpuTileResource, i915MonitoringResource, xeMonitoringResource} for idx := range samegpuIndexMap { request := allResourceRequests[idx] @@ -1598,10 +1612,10 @@ func sanitizeSamegpuResourcesRequest( } } - if getNumI915(request) != samegpuMaxI915Request { + if getNumGPUReq(request) != samegpuMaxI915Request { klog.Errorf( - "Exactly one %v resource has to be requested for containers listed in %v annotation", - gpuPluginResource, samegpuAnnotationName) + "Exactly one %v or %v resource has to be requested for containers listed in %v annotation", + i915PluginResource, xePluginResource, samegpuAnnotationName) return errResConflict } diff --git a/gpu-aware-scheduling/pkg/gpuscheduler/utils.go b/gpu-aware-scheduling/pkg/gpuscheduler/utils.go index 6ba93eb..50e1c48 100644 --- a/gpu-aware-scheduling/pkg/gpuscheduler/utils.go +++ b/gpu-aware-scheduling/pkg/gpuscheduler/utils.go @@ -272,10 +272,12 @@ func hasGPUCapacity(node *v1.Node) bool { return false } - if quantity, ok := node.Status.Capacity[gpuPluginResource]; ok { - numI915, _ := quantity.AsInt64() - if numI915 > 0 { - return true + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + if quantity, ok := node.Status.Capacity[v1.ResourceName(pluginResourceName)]; ok { + numGPU, _ := quantity.AsInt64() + if numGPU > 0 { + return true + } } } From e05cdf0d895dd7b3b99bf8af21a0fa9a3ae17ed7 Mon Sep 17 00:00:00 2001 From: Ukri Niemimuukko Date: Wed, 13 Mar 2024 15:47:52 +0200 Subject: [PATCH 2/3] Tests for xe-resource Signed-off-by: Ukri Niemimuukko --- gpu-aware-scheduling/README.md | 4 +- .../scheduler-config-tas+gas.yaml | 2 + .../pkg/gpuscheduler/scheduler_test.go | 1692 +++++++++-------- .../pkg/gpuscheduler/utils_test.go | 136 +- 4 files changed, 941 insertions(+), 893 deletions(-) diff --git a/gpu-aware-scheduling/README.md b/gpu-aware-scheduling/README.md index 46eeb58..601073a 100644 --- a/gpu-aware-scheduling/README.md +++ b/gpu-aware-scheduling/README.md @@ -16,7 +16,7 @@ The typical use-case which GAS solves can be described with the following imagin 4) Kubernetes Scheduler, if left to its own decision making, can place all the PODs on this one node with only 2 GPUs, since it only considers the memory amount of 16GB. However to be able to do such a deployment and run the PODs successfully, the last instance would need to allocate the GPU memory from two of the GPUs. 5) GAS solves this issue by keeping book of the individual GPU memory amount. After two PODs have been deployed on the node, both GPUs have 3GB of free memory left. When GAS sees that the need for memory is 5GB but none of the GPUs in that node have as much (even though combined there is still 6GB left) it will filter out that node from the list of the nodes k8s scheduler proposed. The POD will not be deployed on that node. -GAS tries to be agnostic about resource types. It doesn't try to have an understanding of the meaning of the resources, they are just numbers to it, which it identifies from other Kubernetes extended resources with the prefix `gpu.intel.com/`. The only resource treated differently is the GPU-plugin `i915`-resource, which is considered to describe "from how many GPUs the GPU-resources for the POD should be evenly consumed". That is, if each GPU has e.g. capacity of 1000 `gpu.intel.com/millicores", and POD spec has a limit for two (2) `gpu.intel.com/i915` and 2000 "gpu.intel.com/millicores`, that POD will consume 1000 millicores from two GPUs, totaling 2000 millicores. After GAS has calculated the resource requirement per GPU by dividing the extended resource numbers with the number of requested `i915`, deploying the POD to a node is only allowed if there are enough resources in the node to satisfy fully the per-GPU resource requirement in as many GPUs as requested in `i915` resource. Typical PODs use just one `i915` and consume resources from only a single GPU. +GAS tries to be agnostic about resource types. It doesn't try to have an understanding of the meaning of the resources, they are just numbers to it, which it identifies from other Kubernetes extended resources with the prefix `gpu.intel.com/`. The only resources treated differently is the GPU-plugin `i915` and `xe` resources, which are considered to describe "from how many GPUs the GPU-resources for the POD should be evenly consumed". That is, if each GPU has e.g. capacity of 1000 `gpu.intel.com/millicores", and POD spec has a limit for two (2) `gpu.intel.com/i915` and 2000 "gpu.intel.com/millicores`, that POD will consume 1000 millicores from two GPUs, totaling 2000 millicores. After GAS has calculated the resource requirement per GPU by dividing the extended resource numbers with the number of requested `i915` or `xe`, deploying the POD to a node is only allowed if there are enough resources in the node to satisfy fully the per-GPU resource requirement in as many GPUs as requested in `i915` or `xe` resource. Typical PODs use just one `i915` or `xe` and consume resources from only a single GPU. Note that a Pod must only request either `i915` or `xe` resources, but never both. GAS heavily utilizes annotations. It itself annotates PODs after making filtering decisions on them, with a precise timestamp at annotation named "gas-ts". The timestamp can then be used for figuring out the time-order of the GAS-made scheduling decision for example during the GPU-plugin resource allocation phase, if the GPU-plugin wants to know the order of GPU-resource consuming POD deploying inside the node. Another annotation which GAS adds is "gas-container-cards". It will have the names of the cards selected for the containers. Containers are separated by "|", and card names are separated by ",". Thus a two-container POD in which both containers use 2 GPUs, could get an annotation "card0,card1|card2,card3". These annotations are then consumed by the Intel GPU device plugin. @@ -141,7 +141,7 @@ spec: ``` There is one change to the yaml here: -- A resources/limits entry requesting the resource `gpu.intel.com/i915` will make GAS take part in scheduling such deployment. If this resource is not requested, GAS will not be used during scheduling of the pod. +- A resources/limits entry requesting the resource `gpu.intel.com/i915` will make GAS take part in scheduling such deployment. If this resource is not requested, GAS will not be used during scheduling of the pod. Note: the `gpu.intel.com/xe` resource is also supported and Pods using it will also be scheduled through GAS. ### Unsupported use-cases diff --git a/gpu-aware-scheduling/deploy/extender-configuration/scheduler-config-tas+gas.yaml b/gpu-aware-scheduling/deploy/extender-configuration/scheduler-config-tas+gas.yaml index 1ddb9b9..d3e6795 100755 --- a/gpu-aware-scheduling/deploy/extender-configuration/scheduler-config-tas+gas.yaml +++ b/gpu-aware-scheduling/deploy/extender-configuration/scheduler-config-tas+gas.yaml @@ -24,6 +24,8 @@ extenders: managedResources: - name: "gpu.intel.com/i915" ignoredByScheduler: false + - name: "gpu.intel.com/xe" + ignoredByScheduler: false ignorable: true tlsConfig: insecure: false diff --git a/gpu-aware-scheduling/pkg/gpuscheduler/scheduler_test.go b/gpu-aware-scheduling/pkg/gpuscheduler/scheduler_test.go index 6cd65c5..5047142 100644 --- a/gpu-aware-scheduling/pkg/gpuscheduler/scheduler_test.go +++ b/gpu-aware-scheduling/pkg/gpuscheduler/scheduler_test.go @@ -53,22 +53,22 @@ func getEmptyExtender() *GASExtender { return emptyExtender } -func getFakePod() *v1.Pod { +func getFakePod(pluginResourceName string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"gas-ts": "1"}, }, - Spec: *getMockPodSpec(), + Spec: *getMockPodSpec(pluginResourceName), } } -func getMockPodSpec() *v1.PodSpec { +func getMockPodSpec(pluginResourceName string) *v1.PodSpec { return &v1.PodSpec{ Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), }, }, }, @@ -76,14 +76,14 @@ func getMockPodSpec() *v1.PodSpec { } } -func getMockPodSpecWithTile(tileCount int) *v1.PodSpec { +func getMockPodSpecWithTile(tileCount int, pluginResourceName string) *v1.PodSpec { return &v1.PodSpec{ Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse(strconv.Itoa(tileCount)), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse(strconv.Itoa(tileCount)), }, }, }, @@ -91,22 +91,22 @@ func getMockPodSpecWithTile(tileCount int) *v1.PodSpec { } } -func getMockPodSpecMultiCont() *v1.PodSpec { +func getMockPodSpecMultiCont(pluginResourceName string) *v1.PodSpec { return &v1.PodSpec{ Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("3"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("3"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("1"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("1"), }, }, }, @@ -114,14 +114,14 @@ func getMockPodSpecMultiCont() *v1.PodSpec { } } -func getMockPodSpecMultiContXeLinked(containerCount int) *v1.PodSpec { +func getMockPodSpecMultiContXeLinked(containerCount int, pluginResourceName string) *v1.PodSpec { containers := []v1.Container{} for i := 0; i < containerCount; i++ { containers = append(containers, v1.Container{ Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("2"), - "gpu.intel.com/tiles": resource.MustParse("2"), + v1.ResourceName(pluginResourceName): resource.MustParse("2"), + "gpu.intel.com/tiles": resource.MustParse("2"), }, }, }) @@ -132,7 +132,7 @@ func getMockPodSpecMultiContXeLinked(containerCount int) *v1.PodSpec { } } -func getMockPodSpecNCont(containerCount int) *v1.PodSpec { +func getMockPodSpecNCont(containerCount int, pluginResourceName string) *v1.PodSpec { containers := []v1.Container{} for i := 1; i <= containerCount; i++ { @@ -140,8 +140,8 @@ func getMockPodSpecNCont(containerCount int) *v1.PodSpec { Name: fmt.Sprintf("container%d", i), Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/millicores": resource.MustParse("100"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/millicores": resource.MustParse("100"), }, }, } @@ -153,15 +153,15 @@ func getMockPodSpecNCont(containerCount int) *v1.PodSpec { } } -func getMockPodSpecMultiContSamegpu() *v1.PodSpec { +func getMockPodSpecMultiContSamegpu(pluginResourceName string) *v1.PodSpec { return &v1.PodSpec{ Containers: []v1.Container{ { Name: "container1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("2"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("2"), }, }, }, @@ -169,8 +169,8 @@ func getMockPodSpecMultiContSamegpu() *v1.PodSpec { Name: "container2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/memory.max": resource.MustParse("8589934592"), // 8Gi + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/memory.max": resource.MustParse("8589934592"), // 8Gi }, }, }, @@ -178,8 +178,8 @@ func getMockPodSpecMultiContSamegpu() *v1.PodSpec { Name: "container3", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/millicores": resource.MustParse("200"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/millicores": resource.MustParse("200"), }, }, }, @@ -187,8 +187,8 @@ func getMockPodSpecMultiContSamegpu() *v1.PodSpec { Name: "container4", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/millicores": resource.MustParse("200"), + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/millicores": resource.MustParse("200"), }, }, }, @@ -196,7 +196,7 @@ func getMockPodSpecMultiContSamegpu() *v1.PodSpec { } } -func getMockNode(sharedDevCount, tileCountPerCard int, cardNames ...string) *v1.Node { +func getMockNode(sharedDevCount, tileCountPerCard int, pluginResourceName string, cardNames ...string) *v1.Node { if len(cardNames) == 0 { cardNames = []string{card0} } @@ -214,9 +214,9 @@ func getMockNode(sharedDevCount, tileCountPerCard int, cardNames ...string) *v1. cardCount := strconv.Itoa(len(cardNames) * sharedDevCount) tileCount := strconv.Itoa(len(cardNames) * tileCountPerCard) - node.Status.Capacity["gpu.intel.com/i915"] = resource.MustParse(cardCount) + node.Status.Capacity[v1.ResourceName(pluginResourceName)] = resource.MustParse(cardCount) node.Status.Capacity["gpu.intel.com/tiles"] = resource.MustParse(tileCount) - node.Status.Allocatable["gpu.intel.com/i915"] = resource.MustParse(cardCount) + node.Status.Allocatable[v1.ResourceName(pluginResourceName)] = resource.MustParse(cardCount) node.Status.Allocatable["gpu.intel.com/tiles"] = resource.MustParse(tileCount) delim := "" @@ -350,198 +350,206 @@ func TestFilterNodes(t *testing.T) { } func TestBindNode(t *testing.T) { - pod := getFakePod() + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) - gas := getDummyExtender(pod) + gas := getDummyExtender(pod) - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - ctx := context.TODO() - - Convey("When the args are empty", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(nil, errMock).Once() - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldNotEqual, "") - }) - - args.Node = nodename - - Convey("When node can't be read", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{}, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(nil, errMock).Once() - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldNotBeNil) - }) - - Convey("When node can be read, but has no capacity", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpec(), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": card0, - }, - }, - }, nil).Once() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldEqual, "will not fit") - }) - - Convey("When node can be read, and it has capacity", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpec(), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(getMockNode(1, 1), nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}, nil).Once() - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldEqual, "") - }) - - Convey("When pod has invalid UID", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpec(), - ObjectMeta: metav1.ObjectMeta{ - UID: "foobar", - }, - }, nil).Once() - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldNotEqual, "") - }) - - iCache = origCacheAPI -} + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + ctx := context.TODO() -func TestAllowlist(t *testing.T) { - pod := getFakePod() + Convey("When the args are empty", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(nil, errMock).Once() + result := gas.bindNode(ctx, &args) + So(result.Error, ShouldNotEqual, "") + }) - gas := getDummyExtender(pod) - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - ctx := context.TODO() + args.Node = nodename - for _, cardName := range []string{card0, "card1"} { - cardName := cardName + Convey("When node can't be read", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{}, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(nil, errMock).Once() + result := gas.bindNode(ctx, &args) + So(result.Error, ShouldNotBeNil) + }) - Convey("When pod has an allowlist and the node card is in it", t, func() { + Convey("When node can be read, but has no capacity", t, func() { mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpec(pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"gas-allow": cardName}, + Labels: map[string]string{ + "gpu.intel.com/cards": card0, + }, }, - Spec: *getMockPodSpec(), }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(getMockNode(1, 1), nil).Once() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}, nil).Once() mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() result := gas.bindNode(ctx, &args) - if cardName == card0 { - So(result.Error, ShouldEqual, "") - } else { - So(result.Error, ShouldEqual, "will not fit") - } + So(result.Error, ShouldEqual, "will not fit") }) - } - - iCache = origCacheAPI -} - -func TestDenylist(t *testing.T) { - pod := getFakePod() - - gas := getDummyExtender(pod) - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - ctx := context.TODO() - - for _, cardName := range []string{card0, "card1"} { - cardName := cardName - Convey("When pod has a denylist", t, func() { + Convey("When node can be read, and it has capacity", t, func() { mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"gas-deny": cardName}, - }, - Spec: *getMockPodSpec(), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"gpu.intel.com/cards": card0}}, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")}, - Allocatable: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")}, - }, + Spec: *getMockPodSpec(pluginResourceName), }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(getMockNode(1, 1, pluginResourceName), nil).Once() mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}, nil).Once() result := gas.bindNode(ctx, &args) - if cardName != card0 { - So(result.Error, ShouldEqual, "") - } else { - So(result.Error, ShouldEqual, "will not fit") - } + So(result.Error, ShouldEqual, "") }) - } - iCache = origCacheAPI -} - -func TestGPUDisabling(t *testing.T) { - pod := getFakePod() - - gas := getDummyExtender(pod) - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - ctx := context.TODO() - - for _, labelValue := range []string{pciGroupValue, trueValueString} { - labelValue := labelValue - - Convey("When node has a disable-label and the node card is in it", t, func() { + Convey("When pod has invalid UID", t, func() { mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpec(), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + Spec: *getMockPodSpec(pluginResourceName), ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": card0, - tasNSPrefix + policy + gpuDisableLabelPrefix + card0: labelValue, - pciGroupLabel: "0", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")}, - Allocatable: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")}, + UID: "foobar", }, }, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() result := gas.bindNode(ctx, &args) - So(result.Error, ShouldEqual, "will not fit") + So(result.Error, ShouldNotEqual, "") }) + + iCache = origCacheAPI } +} - iCache = origCacheAPI +func TestAllowlist(t *testing.T) { + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + + gas := getDummyExtender(pod) + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + ctx := context.TODO() + + for _, cardName := range []string{card0, "card1"} { + cardName := cardName + + Convey("When pod has an allowlist and the node card is in it", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"gas-allow": cardName}, + }, + Spec: *getMockPodSpec(pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(getMockNode(1, 1, pluginResourceName), nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + result := gas.bindNode(ctx, &args) + if cardName == card0 { + So(result.Error, ShouldEqual, "") + } else { + So(result.Error, ShouldEqual, "will not fit") + } + }) + } + + iCache = origCacheAPI + } +} + +func TestDenylist(t *testing.T) { + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + + gas := getDummyExtender(pod) + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + ctx := context.TODO() + + for _, cardName := range []string{card0, "card1"} { + cardName := cardName + + Convey("When pod has a denylist", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"gas-deny": cardName}, + }, + Spec: *getMockPodSpec(pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"gpu.intel.com/cards": card0}}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("1")}, + Allocatable: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("1")}, + }, + }, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + result := gas.bindNode(ctx, &args) + if cardName != card0 { + So(result.Error, ShouldEqual, "") + } else { + So(result.Error, ShouldEqual, "will not fit") + } + }) + } + + iCache = origCacheAPI + } +} + +func TestGPUDisabling(t *testing.T) { + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + + gas := getDummyExtender(pod) + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + ctx := context.TODO() + + for _, labelValue := range []string{pciGroupValue, trueValueString} { + labelValue := labelValue + + Convey("When node has a disable-label and the node card is in it", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpec(pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": card0, + tasNSPrefix + policy + gpuDisableLabelPrefix + card0: labelValue, + pciGroupLabel: "0", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("1")}, + Allocatable: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("1")}, + }, + }, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + result := gas.bindNode(ctx, &args) + So(result.Error, ShouldEqual, "will not fit") + }) + } + + iCache = origCacheAPI + } } func TestWriteResponse(t *testing.T) { @@ -568,47 +576,49 @@ func TestDecodeRequest(t *testing.T) { } func TestPreferredGPU(t *testing.T) { - gas := getEmptyExtender() - node := getMockNode(1, 1, card0, "card1", "card2") + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + gas := getEmptyExtender() + node := getMockNode(1, 1, pluginResourceName, card0, "card1", "card2") - pod := getFakePod() + pod := getFakePod(pluginResourceName) - containerRequest := resourceMap{"gpu.intel.com/i915": 1} - perGPUCapacity := resourceMap{"gpu.intel.com/i915": 1} + containerRequest := resourceMap{pluginResourceName: 1} + perGPUCapacity := resourceMap{pluginResourceName: 1} - nodeResourcesUsed := nodeResources{card0: resourceMap{}, "card1": resourceMap{}, "card2": resourceMap{}} - gpuMap := map[string]bool{card0: true, "card1": true, "card2": true} + nodeResourcesUsed := nodeResources{card0: resourceMap{}, "card1": resourceMap{}, "card2": resourceMap{}} + gpuMap := map[string]bool{card0: true, "card1": true, "card2": true} - Convey("When a gpu is not preferred, alphabetically first gpu should be selected", t, func() { - cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, - node, pod, - nodeResourcesUsed, - gpuMap) + Convey("When a gpu is not preferred, alphabetically first gpu should be selected", t, func() { + cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, + node, pod, + nodeResourcesUsed, + gpuMap) - So(len(cards), ShouldEqual, 1) - So(cards[0], ShouldResemble, Card{ - gpuName: card0, - xeLinkedTileIds: []int{}, + So(len(cards), ShouldEqual, 1) + So(cards[0], ShouldResemble, Card{ + gpuName: card0, + xeLinkedTileIds: []int{}, + }) + So(err, ShouldBeNil) + So(preferred, ShouldBeFalse) }) - So(err, ShouldBeNil) - So(preferred, ShouldBeFalse) - }) - Convey("When a gpu is preferred, it should be selected", t, func() { - node.Labels["telemetry.aware.scheduling.policy/gas-prefer-gpu"] = "card2" - cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, - node, pod, - nodeResourcesUsed, - gpuMap) - - So(len(cards), ShouldEqual, 1) - So(cards[0], ShouldResemble, Card{ - gpuName: "card2", - xeLinkedTileIds: []int{}, + Convey("When a gpu is preferred, it should be selected", t, func() { + node.Labels["telemetry.aware.scheduling.policy/gas-prefer-gpu"] = "card2" + cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, + node, pod, + nodeResourcesUsed, + gpuMap) + + So(len(cards), ShouldEqual, 1) + So(cards[0], ShouldResemble, Card{ + gpuName: "card2", + xeLinkedTileIds: []int{}, + }) + So(err, ShouldBeNil) + So(preferred, ShouldBeTrue) }) - So(err, ShouldBeNil) - So(preferred, ShouldBeTrue) - }) + } } func TestFilter(t *testing.T) { @@ -812,242 +822,252 @@ func TestCreateTileAnnotation(t *testing.T) { } func TestArrangeGPUNamesPerResourceAvailibility(t *testing.T) { - nodeUsedRes := nodeResources{} + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + nodeUsedRes := nodeResources{} - nodeUsedRes[card0] = resourceMap{"gpu.intel.com/i915": 1, "gpu.intel.com/tiles": 2} - nodeUsedRes["card1"] = resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0} - nodeUsedRes["card2"] = resourceMap{"gpu.intel.com/i915": 1, "gpu.intel.com/tiles": 1} + nodeUsedRes[card0] = resourceMap{pluginResourceName: 1, "gpu.intel.com/tiles": 2} + nodeUsedRes["card1"] = resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0} + nodeUsedRes["card2"] = resourceMap{pluginResourceName: 1, "gpu.intel.com/tiles": 1} - Convey("When arranging gpus by tiles, the one with least used tiles is at front", t, func() { - gpuNames := []string{card0, "card1", "card2"} + Convey("When arranging gpus by tiles, the one with least used tiles is at front", t, func() { + gpuNames := []string{card0, "card1", "card2"} - arrangeGPUNamesPerResourceAvailability(nodeUsedRes, gpuNames, "tiles") - So(gpuNames[0], ShouldEqual, "card1") - So(gpuNames[1], ShouldEqual, "card2") - So(gpuNames[2], ShouldEqual, card0) - }) + arrangeGPUNamesPerResourceAvailability(nodeUsedRes, gpuNames, "tiles") + So(gpuNames[0], ShouldEqual, "card1") + So(gpuNames[1], ShouldEqual, "card2") + So(gpuNames[2], ShouldEqual, card0) + }) - Convey("When arranging gpus by unknown, the order of the gpus shouldn't change", t, func() { - gpuNames := []string{card0, "card1", "card2"} + Convey("When arranging gpus by unknown, the order of the gpus shouldn't change", t, func() { + gpuNames := []string{card0, "card1", "card2"} - arrangeGPUNamesPerResourceAvailability(nodeUsedRes, gpuNames, "unknown") - So(gpuNames[0], ShouldEqual, card0) - So(gpuNames[1], ShouldEqual, "card1") - So(gpuNames[2], ShouldEqual, "card2") - }) + arrangeGPUNamesPerResourceAvailability(nodeUsedRes, gpuNames, "unknown") + So(gpuNames[0], ShouldEqual, card0) + So(gpuNames[1], ShouldEqual, "card1") + So(gpuNames[2], ShouldEqual, "card2") + }) + } } func TestResourceBalancedCardsForContainerGPURequest(t *testing.T) { - gas := getEmptyExtender() - gas.balancedResource = "foo" - node := getMockNode(1, 1, card0, "card1", "card2") + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + gas := getEmptyExtender() + gas.balancedResource = "foo" + node := getMockNode(1, 1, card0, "card1", "card2") - pod := getFakePod() + pod := getFakePod(pluginResourceName) - containerRequest := resourceMap{"gpu.intel.com/i915": 1, "gpu.intel.com/foo": 1} - perGPUCapacity := resourceMap{"gpu.intel.com/i915": 1, "gpu.intel.com/foo": 4} + containerRequest := resourceMap{pluginResourceName: 1, "gpu.intel.com/foo": 1} + perGPUCapacity := resourceMap{pluginResourceName: 1, "gpu.intel.com/foo": 4} - nodeResourcesUsed := nodeResources{ - card0: resourceMap{"gpu.intel.com/foo": 1}, - "card1": resourceMap{"gpu.intel.com/foo": 2}, "card2": resourceMap{}, - } - gpuMap := map[string]bool{card0: true, "card1": true, "card2": true} - - Convey("When GPUs are resource balanced, the least consumed GPU should be used", t, func() { - cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, - node, pod, - nodeResourcesUsed, - gpuMap) - - So(len(cards), ShouldEqual, 1) - So(cards[0], ShouldResemble, Card{ - gpuName: "card2", - xeLinkedTileIds: []int{}, + nodeResourcesUsed := nodeResources{ + card0: resourceMap{"gpu.intel.com/foo": 1}, + "card1": resourceMap{"gpu.intel.com/foo": 2}, "card2": resourceMap{}, + } + gpuMap := map[string]bool{card0: true, "card1": true, "card2": true} + + Convey("When GPUs are resource balanced, the least consumed GPU should be used", t, func() { + cards, preferred, err := gas.getCardsForContainerGPURequest(containerRequest, perGPUCapacity, + node, pod, + nodeResourcesUsed, + gpuMap) + + So(len(cards), ShouldEqual, 1) + So(cards[0], ShouldResemble, Card{ + gpuName: "card2", + xeLinkedTileIds: []int{}, + }) + So(err, ShouldBeNil) + So(preferred, ShouldBeFalse) }) - So(err, ShouldBeNil) - So(preferred, ShouldBeFalse) - }) + } } func TestFilterWithXeLinkedDisabledTiles(t *testing.T) { - pod := getFakePod() - pod.Spec = *getMockPodSpecMultiContXeLinked(1) - pod.Annotations[xelinkAnnotationName] = trueValueString + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + pod.Spec = *getMockPodSpecMultiContXeLinked(1, pluginResourceName) + pod.Annotations[xelinkAnnotationName] = trueValueString + + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "") + + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + + type testCase struct { + extraLabels map[string]string + description string + expectedResult bool + } - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "") + testCases := []testCase{ + { + description: "when one tile is disabled and there is one good xe-link left", + extraLabels: map[string]string{tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString}, + expectedResult: false, // node does not fail (is not filtered) + }, + { + description: "when two tiles are disabled and there are no good xe-links left", + extraLabels: map[string]string{ + tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString, + tasNSPrefix + policy + tileDisableLabelPrefix + "card2_gt1": trueValueString, + }, + expectedResult: true, // node fails (is filtered) + }, + } - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename + Convey("When node has four cards with two xelinks and one disabled xe-linked tile, pod should still fit", t, func() { + for _, testCase := range testCases { + t.Logf("test %v", testCase.description) + + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpecWithTile(1, pluginResourceName), + }, nil).Once() + node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/gpu-numbers": "0.1.2.3", + "gpu.intel.com/tiles": "4", + xeLinksLabel: "0.0-1.0_2.1", + xeLinksLabel + "2": "Z-3.2", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("4"), + "gpu.intel.com/tiles": resource.MustParse("16"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("4"), + "gpu.intel.com/tiles": resource.MustParse("16"), + }, + }, + } + for key, value := range testCase.extraLabels { + node.Labels[key] = value + } + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&node, nil).Once() - type testCase struct { - extraLabels map[string]string - description string - expectedResult bool - } + usedResources := nodeResources{card0: resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0}} - testCases := []testCase{ - { - description: "when one tile is disabled and there is one good xe-link left", - extraLabels: map[string]string{tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString}, - expectedResult: false, // node does not fail (is not filtered) - }, - { - description: "when two tiles are disabled and there are no good xe-links left", - extraLabels: map[string]string{ - tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString, - tasNSPrefix + policy + tileDisableLabelPrefix + "card2_gt1": trueValueString, - }, - expectedResult: true, // node fails (is filtered) - }, + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + nodeNames := []string{nodename} + args := extenderv1.ExtenderArgs{} + args.NodeNames = &nodeNames + args.Pod = pod + + result := gas.filterNodes(&args) + So(result.Error, ShouldEqual, "") + _, ok := result.FailedNodes[nodename] + So(ok, ShouldEqual, testCase.expectedResult) + } + }) + + iCache = origCacheAPI } +} - Convey("When node has four cards with two xelinks and one disabled xe-linked tile, pod should still fit", t, func() { - for _, testCase := range testCases { - t.Logf("test %v", testCase.description) +func TestFilterWithNContainerSameGPU(t *testing.T) { + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + pod.Spec = *getMockPodSpecNCont(5, pluginResourceName) + pod.Annotations[samegpuAnnotationName] = "container1,container2,container3,container4,container5" + + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "") + + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + + type testCase struct { + extraLabels map[string]string + description string + expectedResult bool + } - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), - }, nil).Once() - node := v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/gpu-numbers": "0.1.2.3", - "gpu.intel.com/tiles": "4", - xeLinksLabel: "0.0-1.0_2.1", - xeLinksLabel + "2": "Z-3.2", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("4"), - "gpu.intel.com/tiles": resource.MustParse("16"), + testCases := []testCase{ + { + description: "when there are 3 plugin resources left in cards, pod with 5 same-gpu containers should not fit", + expectedResult: true, + }, + } + + Convey("When node has 3 plugin resources left in cards, pod should not fit", t, func() { + for _, testCase := range testCases { + t.Logf("test %v", testCase.description) + node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/gpu-numbers": "0.1", + }, }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("4"), - "gpu.intel.com/tiles": resource.MustParse("16"), + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("16"), + "gpu.intel.com/millicores": resource.MustParse("2000"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("16"), + "gpu.intel.com/millicores": resource.MustParse("2000"), + }, }, - }, - } - for key, value := range testCase.extraLabels { - node.Labels[key] = value - } - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&node, nil).Once() + } + for key, value := range testCase.extraLabels { + node.Labels[key] = value + } + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&node, nil).Once() - usedResources := nodeResources{card0: resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0}} + usedResources := nodeResources{ + card0: resourceMap{pluginResourceName: 5, "gpu.intel.com/millicores": 500}, + "card1": resourceMap{pluginResourceName: 5, "gpu.intel.com/millicores": 500}, + } - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() - nodeNames := []string{nodename} - args := extenderv1.ExtenderArgs{} - args.NodeNames = &nodeNames - args.Pod = pod + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + nodeNames := []string{nodename} + args := extenderv1.ExtenderArgs{} + args.NodeNames = &nodeNames + args.Pod = pod - result := gas.filterNodes(&args) - So(result.Error, ShouldEqual, "") - _, ok := result.FailedNodes[nodename] - So(ok, ShouldEqual, testCase.expectedResult) - } - }) + result := gas.filterNodes(&args) + So(result.Error, ShouldEqual, "") + _, ok := result.FailedNodes[nodename] + So(ok, ShouldEqual, testCase.expectedResult) + } + }) - iCache = origCacheAPI + iCache = origCacheAPI + } } -func TestFilterWithNContainerSameGPU(t *testing.T) { - pod := getFakePod() - pod.Spec = *getMockPodSpecNCont(5) - pod.Annotations[samegpuAnnotationName] = "container1,container2,container3,container4,container5" - - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "") +func runSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing.T, pluginResourceName string) { + t.Helper() - mockCache := MockCacheAPI{} + ctx := context.TODO() origCacheAPI := iCache - iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} args.Node = nodename type testCase struct { - extraLabels map[string]string - description string - expectedResult bool - } - - testCases := []testCase{ - { - description: "when there are 3 i915 left in cards, pod with 5 same-gpu containers should not fit", - expectedResult: true, - }, - } - - Convey("When node has 3 i915 left in cards, pod should not fit", t, func() { - for _, testCase := range testCases { - t.Logf("test %v", testCase.description) - node := v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/gpu-numbers": "0.1", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("16"), - "gpu.intel.com/millicores": resource.MustParse("2000"), - }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("16"), - "gpu.intel.com/millicores": resource.MustParse("2000"), - }, - }, - } - for key, value := range testCase.extraLabels { - node.Labels[key] = value - } - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&node, nil).Once() - - usedResources := nodeResources{ - card0: resourceMap{"gpu.intel.com/i915": 5, "gpu.intel.com/millicores": 500}, - "card1": resourceMap{"gpu.intel.com/i915": 5, "gpu.intel.com/millicores": 500}, - } - - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() - nodeNames := []string{nodename} - args := extenderv1.ExtenderArgs{} - args.NodeNames = &nodeNames - args.Pod = pod - - result := gas.filterNodes(&args) - So(result.Error, ShouldEqual, "") - _, ok := result.FailedNodes[nodename] - So(ok, ShouldEqual, testCase.expectedResult) - } - }) - - iCache = origCacheAPI -} - -func TestRunSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing.T) { - ctx := context.TODO() - origCacheAPI := iCache - - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - - type testCase struct { - extraLabels map[string]string - extraAnnotations map[string]string - description string - expectedCardAnnotation string - expectTimestamp bool - expectError bool - defaultTileCheck bool + extraLabels map[string]string + extraAnnotations map[string]string + description string + expectedCardAnnotation string + expectTimestamp bool + expectError bool + defaultTileCheck bool } testCases := []testCase{ @@ -1124,9 +1144,9 @@ func TestRunSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing. for _, testCase := range testCases { t.Logf("test %v", testCase.description) - pod := getFakePod() - mockNode := getMockNode(4, 4, card0, "card1", "card2", "card3") - pod.Spec = *getMockPodSpecMultiContXeLinked(2) + pod := getFakePod(pluginResourceName) + mockNode := getMockNode(4, 4, pluginResourceName, card0, "card1", "card2", "card3") + pod.Spec = *getMockPodSpecMultiContXeLinked(2, pluginResourceName) clientset := fake.NewSimpleClientset(pod) iCache = origCacheAPI @@ -1134,7 +1154,7 @@ func TestRunSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing. mockCache := MockCacheAPI{} iCache = &mockCache - nodeRes := nodeResources{card0: resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0}} + nodeRes := nodeResources{card0: resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0}} noTilesInUse := nodeTiles{card0: []int{}} for key, value := range testCase.extraLabels { @@ -1206,491 +1226,511 @@ func TestRunSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing. iCache = origCacheAPI } +func TestRunSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t *testing.T) { + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + runSchedulingLogicWithMultiContainerXelinkedTileResourceReq(t, pluginResourceName) + } +} + func TestRunSchedulingLogicWithMultiContainerTileResourceReq(t *testing.T) { - pod := getFakePod() + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "tiles") - mockNode := getMockNode(4, 4, card0) + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "tiles") + mockNode := getMockNode(4, 4, pluginResourceName, card0) - pod.Spec = *getMockPodSpecMultiCont() + pod.Spec = *getMockPodSpecMultiCont(pluginResourceName) - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename - nodeRes := nodeResources{card0: resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0}} - noTilesInUse := nodeTiles{card0: []int{}} + nodeRes := nodeResources{card0: resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0}} + noTilesInUse := nodeTiles{card0: []int{}} - ctx := context.TODO() + ctx := context.TODO() - Convey("When running scheduling logic with multi-container pod with tile request", t, func() { - cardAnnotation := "" - tileAnnotation := "" - timestampFound := false - applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { - patchAction, _ := action.(k8stesting.PatchAction) - patch := patchAction.GetPatch() - - arr := []patchValue{} - merr := json.Unmarshal(patch, &arr) - if merr != nil { - return false, nil, fmt.Errorf("error %w", merr) - } + Convey("When running scheduling logic with multi-container pod with tile request", t, func() { + cardAnnotation := "" + tileAnnotation := "" + timestampFound := false + applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { + patchAction, _ := action.(k8stesting.PatchAction) + patch := patchAction.GetPatch() + + arr := []patchValue{} + merr := json.Unmarshal(patch, &arr) + if merr != nil { + return false, nil, fmt.Errorf("error %w", merr) + } - for _, patch := range arr { - switch { - case strings.Contains(patch.Path, tsAnnotationName): - timestampFound = true - case strings.Contains(patch.Path, cardAnnotationName): - cardAnnotation, _ = patch.Value.(string) - case strings.Contains(patch.Path, tileAnnotationName): - tileAnnotation, _ = patch.Value.(string) + for _, patch := range arr { + switch { + case strings.Contains(patch.Path, tsAnnotationName): + timestampFound = true + case strings.Contains(patch.Path, cardAnnotationName): + cardAnnotation, _ = patch.Value.(string) + case strings.Contains(patch.Path, tileAnnotationName): + tileAnnotation, _ = patch.Value.(string) + } } + + return true, nil, nil } - return true, nil, nil - } + mockCache.On("FetchNode", mock.Anything, mock.Anything).Return(mockNode, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeRes).Once() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse) + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(pod, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - mockCache.On("FetchNode", mock.Anything, mock.Anything).Return(mockNode, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeRes).Once() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse) - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(pod, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, - mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - - clientset.Fake.PrependReactor("patch", "pods", applyCheck) - result := gas.bindNode(ctx, &args) - clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] - - So(cardAnnotation, ShouldEqual, "card0|card0") - split := strings.Split(tileAnnotation, "|") - // Check the tile split between containers - So(len(split), ShouldEqual, 2) - So(strings.Count(split[0], "gt"), ShouldEqual, 3) - So(strings.Count(split[1], "gt"), ShouldEqual, 1) - // NOTE: tile annotation should include all the available tiles. If one or - // more tiles are used twice then the tested code isn't working correctly - So(strings.Contains(tileAnnotation, "gt0"), ShouldEqual, true) - So(strings.Contains(tileAnnotation, "gt1"), ShouldEqual, true) - So(strings.Contains(tileAnnotation, "gt2"), ShouldEqual, true) - So(strings.Contains(tileAnnotation, "gt3"), ShouldEqual, true) - - So(timestampFound, ShouldEqual, true) - So(result.Error, ShouldEqual, "") - }) + clientset.Fake.PrependReactor("patch", "pods", applyCheck) + result := gas.bindNode(ctx, &args) + clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] - iCache = origCacheAPI + So(cardAnnotation, ShouldEqual, "card0|card0") + split := strings.Split(tileAnnotation, "|") + // Check the tile split between containers + So(len(split), ShouldEqual, 2) + So(strings.Count(split[0], "gt"), ShouldEqual, 3) + So(strings.Count(split[1], "gt"), ShouldEqual, 1) + // NOTE: tile annotation should include all the available tiles. If one or + // more tiles are used twice then the tested code isn't working correctly + So(strings.Contains(tileAnnotation, "gt0"), ShouldEqual, true) + So(strings.Contains(tileAnnotation, "gt1"), ShouldEqual, true) + So(strings.Contains(tileAnnotation, "gt2"), ShouldEqual, true) + So(strings.Contains(tileAnnotation, "gt3"), ShouldEqual, true) + + So(timestampFound, ShouldEqual, true) + So(result.Error, ShouldEqual, "") + }) + + iCache = origCacheAPI + } } func TestTileDisablingDeschedulingAndPreference(t *testing.T) { - pod := getFakePod() - - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "") - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - ctx := context.TODO() + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "") + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + ctx := context.TODO() + + for _, labelPart := range []string{tileDisableLabelPrefix, tileDeschedLabelPrefix} { + Convey("When node has a tile disabled/descheduled-label and the node card is in it", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpecWithTile(1, pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": card0, + "gpu.intel.com/tiles": "1", + tasNSPrefix + policy + labelPart + card0Gt0: trueValueString, + pciGroupLabel: "0", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("1"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("1"), + }, + }, + }, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + noTilesInUse := nodeTiles{card0: []int{}} + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Once() + + result := gas.bindNode(ctx, &args) + So(result.Error, ShouldEqual, "will not fit") + }) + } - for _, labelPart := range []string{tileDisableLabelPrefix, tileDeschedLabelPrefix} { - Convey("When node has a tile disabled/descheduled-label and the node card is in it", t, func() { + Convey("When node has a tile descheduled label but another card to use", t, func() { mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), + Spec: *getMockPodSpecWithTile(1, pluginResourceName), }, nil).Once() mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "gpu.intel.com/cards": card0, - "gpu.intel.com/tiles": "1", - tasNSPrefix + policy + labelPart + card0Gt0: trueValueString, - pciGroupLabel: "0", + "gpu.intel.com/cards": "card0.card1", + "gpu.intel.com/tiles": "2", + tasNSPrefix + policy + tileDeschedLabelPrefix + "card1_gt0": trueValueString, + pciGroupLabel: "0", }, }, Status: v1.NodeStatus{ Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("1"), + v1.ResourceName(pluginResourceName): resource.MustParse("2"), + "gpu.intel.com/tiles": resource.MustParse("2"), }, Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("1"), + v1.ResourceName(pluginResourceName): resource.MustParse("2"), + "gpu.intel.com/tiles": resource.MustParse("2"), }, }, }, nil).Once() mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() mockCache.On("AdjustPodResourcesL", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - noTilesInUse := nodeTiles{card0: []int{}} - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Once() + noTilesInUse := nodeTiles{card0: []int{}, "card1": []int{}} + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Twice() result := gas.bindNode(ctx, &args) - So(result.Error, ShouldEqual, "will not fit") + So(result.Error, ShouldEqual, "") }) - } - Convey("When node has a tile descheduled label but another card to use", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": "card0.card1", - "gpu.intel.com/tiles": "2", - tasNSPrefix + policy + tileDeschedLabelPrefix + "card1_gt0": trueValueString, - pciGroupLabel: "0", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("2"), - "gpu.intel.com/tiles": resource.MustParse("2"), - }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("2"), - "gpu.intel.com/tiles": resource.MustParse("2"), - }, - }, - }, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - noTilesInUse := nodeTiles{card0: []int{}, "card1": []int{}} - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Twice() - - result := gas.bindNode(ctx, &args) - So(result.Error, ShouldEqual, "") - }) - - Convey("When node has a preferred card label and fits", t, func() { - applied := false - applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { - patchAction, _ := action.(k8stesting.PatchAction) - requiredStr := "card1" - patch := patchAction.GetPatch() - patchStr := string(patch) + Convey("When node has a preferred card label and fits", t, func() { + applied := false + applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { + patchAction, _ := action.(k8stesting.PatchAction) + requiredStr := "card1" + patch := patchAction.GetPatch() + patchStr := string(patch) - if !strings.Contains(patchStr, requiredStr) { - return true, nil, errNotFound - } + if !strings.Contains(patchStr, requiredStr) { + return true, nil, errNotFound + } - applied = true + applied = true - return true, nil, nil - } + return true, nil, nil + } - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpec(), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": "card0.card1", - tasNSPrefix + policy + "gas-prefer-gpu": "card1", - pciGroupLabel: "0_1", + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpec(pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": "card0.card1", + tasNSPrefix + policy + "gas-prefer-gpu": "card1", + pciGroupLabel: "0_1", + }, }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("2")}, - Allocatable: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("2")}, - }, - }, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - noTilesInUse := nodeTiles{card0: []int{}, "card1": []int{}} - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Once() + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("2")}, + Allocatable: v1.ResourceList{v1.ResourceName(pluginResourceName): resource.MustParse("2")}, + }, + }, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + noTilesInUse := nodeTiles{card0: []int{}, "card1": []int{}} + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Once() - clientset.Fake.PrependReactor("patch", "pods", applyCheck) - result := gas.bindNode(ctx, &args) - clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] + clientset.Fake.PrependReactor("patch", "pods", applyCheck) + result := gas.bindNode(ctx, &args) + clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] - So(result.Error, ShouldEqual, "") - So(applied, ShouldEqual, true) - }) + So(result.Error, ShouldEqual, "") + So(applied, ShouldEqual, true) + }) - Convey("When node has a tile preferred-label", t, func() { - applied := false - applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { - patchAction, _ := action.(k8stesting.PatchAction) - requiredStr := "card0:gt3" - patch := patchAction.GetPatch() - patchStr := string(patch) + Convey("When node has a tile preferred-label", t, func() { + applied := false + applyCheck := func(action k8stesting.Action) (bool, runtime.Object, error) { + patchAction, _ := action.(k8stesting.PatchAction) + requiredStr := "card0:gt3" + patch := patchAction.GetPatch() + patchStr := string(patch) - if !strings.Contains(patchStr, requiredStr) { - return true, nil, errNotFound - } + if !strings.Contains(patchStr, requiredStr) { + return true, nil, errNotFound + } - applied = true + applied = true - return true, nil, nil - } + return true, nil, nil + } - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": card0, - "gpu.intel.com/tiles": "4", - tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString, - tasNSPrefix + policy + tilePrefLabelPrefix + card0: "gt3", - pciGroupLabel: "0", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("4"), + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpecWithTile(1, pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": card0, + "gpu.intel.com/tiles": "4", + tasNSPrefix + policy + tileDisableLabelPrefix + card0Gt0: trueValueString, + tasNSPrefix + policy + tilePrefLabelPrefix + card0: "gt3", + pciGroupLabel: "0", + }, }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("4"), + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("4"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("4"), + }, }, - }, - }, nil).Once() - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + }, nil).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(nodeResources{}, nil).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - noTilesInUse := nodeTiles{card0: []int{}} - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Twice() + noTilesInUse := nodeTiles{card0: []int{}} + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(noTilesInUse).Twice() - clientset.Fake.PrependReactor("patch", "pods", applyCheck) - result := gas.bindNode(ctx, &args) - clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] + clientset.Fake.PrependReactor("patch", "pods", applyCheck) + result := gas.bindNode(ctx, &args) + clientset.Fake.ReactionChain = clientset.Fake.ReactionChain[1:] - So(result.Error, ShouldEqual, "") - So(applied, ShouldEqual, true) - }) + So(result.Error, ShouldEqual, "") + So(applied, ShouldEqual, true) + }) - iCache = origCacheAPI + iCache = origCacheAPI + } } func TestTileSanitation(t *testing.T) { - pod := getFakePod() - pod.Spec = *getMockPodSpecWithTile(1) - - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "") - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - - Convey("When node has an invalid tile disabled and pod should still fit", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": card0, - "gpu.intel.com/tiles": "1", - tasNSPrefix + policy + tileDisableLabelPrefix + "card0_gt6": trueValueString, - pciGroupLabel: "0", - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("1"), + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + pod.Spec = *getMockPodSpecWithTile(1, pluginResourceName) + + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "") + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + + Convey("When node has an invalid tile disabled and pod should still fit", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpecWithTile(1, pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": card0, + "gpu.intel.com/tiles": "1", + tasNSPrefix + policy + tileDisableLabelPrefix + "card0_gt6": trueValueString, + pciGroupLabel: "0", + }, }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("1"), - "gpu.intel.com/tiles": resource.MustParse("1"), + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("1"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("1"), + "gpu.intel.com/tiles": resource.MustParse("1"), + }, }, - }, - }, nil).Once() + }, nil).Once() - usedResources := nodeResources{card0: resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0}} + usedResources := nodeResources{card0: resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0}} - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Once() - nodeNames := []string{nodename} - args := extenderv1.ExtenderArgs{} - args.NodeNames = &nodeNames - args.Pod = pod + nodeNames := []string{nodename} + args := extenderv1.ExtenderArgs{} + args.NodeNames = &nodeNames + args.Pod = pod - result := gas.filterNodes(&args) - So(result.Error, ShouldEqual, "") - _, ok := result.FailedNodes[nodename] - So(ok, ShouldEqual, false) - }) + result := gas.filterNodes(&args) + So(result.Error, ShouldEqual, "") + _, ok := result.FailedNodes[nodename] + So(ok, ShouldEqual, false) + }) - iCache = origCacheAPI + iCache = origCacheAPI + } } func TestFilterWithDisabledTiles(t *testing.T) { - pod := getFakePod() - pod.Spec = *getMockPodSpecWithTile(1) - - clientset := fake.NewSimpleClientset(pod) - gas := NewGASExtender(clientset, false, false, "") - mockCache := MockCacheAPI{} - origCacheAPI := iCache - iCache = &mockCache - args := extenderv1.ExtenderBindingArgs{} - args.Node = nodename - - Convey("When node has two cards and one disabled tile, pod should still fit", t, func() { - mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ - Spec: *getMockPodSpecWithTile(1), - }, nil).Once() - mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "gpu.intel.com/cards": "card0.card1", - "gpu.intel.com/tiles": "2", - tasNSPrefix + policy + tileDisableLabelPrefix + "card1_gt0": trueValueString, - }, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("2"), - "gpu.intel.com/tiles": resource.MustParse("2"), + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := getFakePod(pluginResourceName) + pod.Spec = *getMockPodSpecWithTile(1, pluginResourceName) + + clientset := fake.NewSimpleClientset(pod) + gas := NewGASExtender(clientset, false, false, "") + mockCache := MockCacheAPI{} + origCacheAPI := iCache + iCache = &mockCache + args := extenderv1.ExtenderBindingArgs{} + args.Node = nodename + + Convey("When node has two cards and one disabled tile, pod should still fit", t, func() { + mockCache.On("FetchPod", mock.Anything, args.PodNamespace, args.PodName).Return(&v1.Pod{ + Spec: *getMockPodSpecWithTile(1, pluginResourceName), + }, nil).Once() + mockCache.On("FetchNode", mock.Anything, args.Node).Return(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gpu.intel.com/cards": "card0.card1", + "gpu.intel.com/tiles": "2", + tasNSPrefix + policy + tileDisableLabelPrefix + "card1_gt0": trueValueString, + }, }, - Allocatable: v1.ResourceList{ - "gpu.intel.com/i915": resource.MustParse("2"), - "gpu.intel.com/tiles": resource.MustParse("2"), + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("2"), + "gpu.intel.com/tiles": resource.MustParse("2"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceName(pluginResourceName): resource.MustParse("2"), + "gpu.intel.com/tiles": resource.MustParse("2"), + }, }, - }, - }, nil).Once() + }, nil).Once() - usedResources := nodeResources{card0: resourceMap{"gpu.intel.com/i915": 0, "gpu.intel.com/tiles": 0}} + usedResources := nodeResources{card0: resourceMap{pluginResourceName: 0, "gpu.intel.com/tiles": 0}} - mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() - mockCache.On("AdjustPodResourcesL", - mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Twice() - nodeNames := []string{nodename} - args := extenderv1.ExtenderArgs{} - args.NodeNames = &nodeNames - args.Pod = pod + mockCache.On("GetNodeResourceStatus", mock.Anything, mock.Anything).Return(usedResources).Once() + mockCache.On("AdjustPodResourcesL", + mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + mockCache.On("GetNodeTileStatus", mock.Anything, mock.Anything).Return(nodeTiles{}).Twice() + nodeNames := []string{nodename} + args := extenderv1.ExtenderArgs{} + args.NodeNames = &nodeNames + args.Pod = pod - result := gas.filterNodes(&args) - So(result.Error, ShouldEqual, "") - _, ok := result.FailedNodes[nodename] - So(ok, ShouldEqual, false) - }) + result := gas.filterNodes(&args) + So(result.Error, ShouldEqual, "") + _, ok := result.FailedNodes[nodename] + So(ok, ShouldEqual, false) + }) - iCache = origCacheAPI + iCache = origCacheAPI + } } func TestSanitizeSamegpulist(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{}, - }, - Spec: *getMockPodSpecMultiContSamegpu(), - } - - wrongValueReason := map[string]string{ - "container1,container5": "Listing absent containers makes gas-same-gpu ignored", - "container1": "Listing single container name makes gas-same-gpu ignored", - "": "Empty gas-same-gpu annotation is ignored", - "container1,container2,container2": "gas-same-gpu with duplicates is ignored", - } - - Convey("Ensure no gas-same-gpu annotation returns blank list with no error", - t, func() { - containerNames, err := containersRequestingSamegpu(pod) - So(len(containerNames), ShouldEqual, 0) - So(err, ShouldEqual, nil) - }) + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: *getMockPodSpecMultiContSamegpu(pluginResourceName), + } - for wrongValue, reason := range wrongValueReason { - pod.ObjectMeta.Annotations["gas-same-gpu"] = wrongValue + wrongValueReason := map[string]string{ + "container1,container5": "Listing absent containers makes gas-same-gpu ignored", + "container1": "Listing single container name makes gas-same-gpu ignored", + "": "Empty gas-same-gpu annotation is ignored", + "container1,container2,container2": "gas-same-gpu with duplicates is ignored", + } - Convey(reason, + Convey("Ensure no gas-same-gpu annotation returns blank list with no error", t, func() { containerNames, err := containersRequestingSamegpu(pod) So(len(containerNames), ShouldEqual, 0) - So("malformed annotation", ShouldEqual, err.Error()) + So(err, ShouldEqual, nil) }) - } - pod.ObjectMeta.Annotations["gas-same-gpu"] = "container2,container3" + for wrongValue, reason := range wrongValueReason { + pod.ObjectMeta.Annotations["gas-same-gpu"] = wrongValue - Convey("Ensure correct annotation returns all listed container names with no error", - t, func() { - containerNames, err := containersRequestingSamegpu(pod) - So(containerNames, ShouldResemble, map[string]bool{"container2": true, "container3": true}) - So(err, ShouldEqual, nil) - }) + Convey(reason, + t, func() { + containerNames, err := containersRequestingSamegpu(pod) + So(len(containerNames), ShouldEqual, 0) + So("malformed annotation", ShouldEqual, err.Error()) + }) + } + + pod.ObjectMeta.Annotations["gas-same-gpu"] = "container2,container3" + + Convey("Ensure correct annotation returns all listed container names with no error", + t, func() { + containerNames, err := containersRequestingSamegpu(pod) + So(containerNames, ShouldResemble, map[string]bool{"container2": true, "container3": true}) + So(err, ShouldEqual, nil) + }) + } } func TestSanitizeSamegpuResourcesRequest(t *testing.T) { - Convey("Tiles and monitoring resources are not allowed in same-gpu resourceRequests", - t, func() { - // fail because of tiles - samegpuIndexes := map[int]bool{0: true} - resourceRequests := []resourceMap{ - {"gpu.intel.com/i915": 1, "gpu.intel.com/tiles": 2}, - } - err := sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err.Error(), ShouldEqual, "resources conflict") - - // fail because of monitoring - samegpuIndexes = map[int]bool{0: true} - resourceRequests = []resourceMap{ - {"gpu.intel.com/i915": 1, "gpu.intel.com/i915_monitoring": 1}, - } - err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err.Error(), ShouldEqual, "resources conflict") - - // success - samegpuIndexes = map[int]bool{0: true} - resourceRequests = []resourceMap{ - { - "gpu.intel.com/i915": 1, - "gpu.intel.com/millicores": 100, - "gpu.intel.com/memory.max": 8589934592, - }, - } - err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err, ShouldEqual, nil) - }) + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + for _, monitoringResourceName := range []string{xeMonitoringResource, i915MonitoringResource} { + Convey("Tiles and monitoring resources are not allowed in same-gpu resourceRequests", + t, func() { + // fail because of tiles + samegpuIndexes := map[int]bool{0: true} + resourceRequests := []resourceMap{ + {pluginResourceName: 1, "gpu.intel.com/tiles": 2}, + } + err := sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err.Error(), ShouldEqual, "resources conflict") - Convey("Same-gpu containers should have exactly one device resource requested", - t, func() { - // failure heterogeneous - samegpuIndexes := map[int]bool{0: true, 1: true} - resourceRequests := []resourceMap{ - {"gpu.intel.com/i915": 1, "gpu.intel.com/millicores": 200}, - {"gpu.intel.com/i915": 2, "gpu.intel.com/millicores": 200}, - } - err := sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err.Error(), ShouldEqual, "resources conflict") - - // Failure homogeneous - samegpuIndexes = map[int]bool{0: true, 1: true} - resourceRequests = []resourceMap{ - {"gpu.intel.com/i915": 2, "gpu.intel.com/millicores": 200}, - {"gpu.intel.com/i915": 2, "gpu.intel.com/millicores": 200}, - } - err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err.Error(), ShouldEqual, "resources conflict") - - // Success - samegpuIndexes = map[int]bool{0: true, 1: true} - resourceRequests = []resourceMap{ - {"gpu.intel.com/i915": 1, "gpu.intel.com/millicores": 200}, - {"gpu.intel.com/i915": 1, "gpu.intel.com/millicores": 300}, - } - err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) - So(err, ShouldEqual, nil) - }) + // fail because of monitoring + samegpuIndexes = map[int]bool{0: true} + resourceRequests = []resourceMap{ + {pluginResourceName: 1, monitoringResourceName: 1}, + } + err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err.Error(), ShouldEqual, "resources conflict") + + // success + samegpuIndexes = map[int]bool{0: true} + resourceRequests = []resourceMap{ + { + pluginResourceName: 1, + "gpu.intel.com/millicores": 100, + "gpu.intel.com/memory.max": 8589934592, + }, + } + err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err, ShouldEqual, nil) + }) + + Convey("Same-gpu containers should have exactly one device resource requested", + t, func() { + // failure heterogeneous + samegpuIndexes := map[int]bool{0: true, 1: true} + resourceRequests := []resourceMap{ + {pluginResourceName: 1, "gpu.intel.com/millicores": 200}, + {pluginResourceName: 2, "gpu.intel.com/millicores": 200}, + } + err := sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err.Error(), ShouldEqual, "resources conflict") + + // Failure homogeneous + samegpuIndexes = map[int]bool{0: true, 1: true} + resourceRequests = []resourceMap{ + {pluginResourceName: 2, "gpu.intel.com/millicores": 200}, + {pluginResourceName: 2, "gpu.intel.com/millicores": 200}, + } + err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err.Error(), ShouldEqual, "resources conflict") + + // Success + samegpuIndexes = map[int]bool{0: true, 1: true} + resourceRequests = []resourceMap{ + {pluginResourceName: 1, "gpu.intel.com/millicores": 200}, + {pluginResourceName: 1, "gpu.intel.com/millicores": 300}, + } + err = sanitizeSamegpuResourcesRequest(samegpuIndexes, resourceRequests) + So(err, ShouldEqual, nil) + }) + } + } } diff --git a/gpu-aware-scheduling/pkg/gpuscheduler/utils_test.go b/gpu-aware-scheduling/pkg/gpuscheduler/utils_test.go index 1cd74db..693761a 100644 --- a/gpu-aware-scheduling/pkg/gpuscheduler/utils_test.go +++ b/gpu-aware-scheduling/pkg/gpuscheduler/utils_test.go @@ -118,46 +118,48 @@ func TestGPUNameToLZeroDeviceId(t *testing.T) { } func TestPCIGroups(t *testing.T) { - defaultGroups := "0.1_2.3.4" - - Convey("When the GPU belongs to a PCI Group", t, func() { - node := getMockNode(1, 1) - node.Labels[pciGroupLabel] = defaultGroups - So(getPCIGroup(node, "card0"), ShouldResemble, []string{"0", "1"}) - So(getPCIGroup(node, "card1"), ShouldResemble, []string{"0", "1"}) - So(getPCIGroup(node, "card2"), ShouldResemble, []string{"2", "3", "4"}) - So(getPCIGroup(node, "card3"), ShouldResemble, []string{"2", "3", "4"}) - So(getPCIGroup(node, "card4"), ShouldResemble, []string{"2", "3", "4"}) - }) + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + defaultGroups := "0.1_2.3.4" + + Convey("When the GPU belongs to a PCI Group", t, func() { + node := getMockNode(1, 1, pluginResourceName) + node.Labels[pciGroupLabel] = defaultGroups + So(getPCIGroup(node, "card0"), ShouldResemble, []string{"0", "1"}) + So(getPCIGroup(node, "card1"), ShouldResemble, []string{"0", "1"}) + So(getPCIGroup(node, "card2"), ShouldResemble, []string{"2", "3", "4"}) + So(getPCIGroup(node, "card3"), ShouldResemble, []string{"2", "3", "4"}) + So(getPCIGroup(node, "card4"), ShouldResemble, []string{"2", "3", "4"}) + }) - Convey("When the GPU belongs to a PCI Group with multiple group labels", t, func() { - node := getMockNode(1, 1) - node.Labels[pciGroupLabel] = defaultGroups - node.Labels[pciGroupLabel+"2"] = "Z_5.6_7.8_11.12" - node.Labels[pciGroupLabel+"3"] = "Z_9.10" - So(getPCIGroup(node, "card6"), ShouldResemble, []string{"5", "6"}) - So(getPCIGroup(node, "card9"), ShouldResemble, []string{"9", "10"}) - So(getPCIGroup(node, "card20"), ShouldResemble, []string{}) - }) + Convey("When the GPU belongs to a PCI Group with multiple group labels", t, func() { + node := getMockNode(1, 1, pluginResourceName) + node.Labels[pciGroupLabel] = defaultGroups + node.Labels[pciGroupLabel+"2"] = "Z_5.6_7.8_11.12" + node.Labels[pciGroupLabel+"3"] = "Z_9.10" + So(getPCIGroup(node, "card6"), ShouldResemble, []string{"5", "6"}) + So(getPCIGroup(node, "card9"), ShouldResemble, []string{"9", "10"}) + So(getPCIGroup(node, "card20"), ShouldResemble, []string{}) + }) - Convey("When I call addPCIGroupGPUs with a proper node and cards map", t, func() { - node := getMockNode(1, 1) - node.Labels[pciGroupLabel] = defaultGroups - cards := []string{} - cards = addPCIGroupGPUs(node, "card3", cards) + Convey("When I call addPCIGroupGPUs with a proper node and cards map", t, func() { + node := getMockNode(1, 1, pluginResourceName) + node.Labels[pciGroupLabel] = defaultGroups + cards := []string{} + cards = addPCIGroupGPUs(node, "card3", cards) - So(len(cards), ShouldEqual, 3) - So(cards, ShouldContain, "card2") - So(cards, ShouldContain, "card3") - So(cards, ShouldContain, "card4") + So(len(cards), ShouldEqual, 3) + So(cards, ShouldContain, "card2") + So(cards, ShouldContain, "card3") + So(cards, ShouldContain, "card4") - cards2 := []string{} - cards2 = addPCIGroupGPUs(node, "card0", cards2) + cards2 := []string{} + cards2 = addPCIGroupGPUs(node, "card0", cards2) - So(len(cards2), ShouldEqual, 2) - So(cards2, ShouldContain, "card0") - So(cards2, ShouldContain, "card1") - }) + So(len(cards2), ShouldEqual, 2) + So(cards2, ShouldContain, "card0") + So(cards2, ShouldContain, "card1") + }) + } } func TestTASNamespaceStrip(t *testing.T) { @@ -325,37 +327,41 @@ func TestSanitizeTiles(t *testing.T) { } func TestConcatenateSplitLabel(t *testing.T) { - Convey("When the label is split, it can be concatenated", t, func() { - node := getMockNode(1, 1) - node.Labels[pciGroupLabel] = "foo" - node.Labels[pciGroupLabel+"2"] = "Zbar" - node.Labels[pciGroupLabel+"3"] = "Zber" - result := concatenateSplitLabel(node, pciGroupLabel) - So(result, ShouldEqual, "foobarber") - }) + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + Convey("When the label is split, it can be concatenated", t, func() { + node := getMockNode(1, 1, pluginResourceName) + node.Labels[pciGroupLabel] = "foo" + node.Labels[pciGroupLabel+"2"] = "Zbar" + node.Labels[pciGroupLabel+"3"] = "Zber" + result := concatenateSplitLabel(node, pciGroupLabel) + So(result, ShouldEqual, "foobarber") + }) + } } func TestContainerRequestsNoSamegpu(t *testing.T) { - Convey( - "With empty same-gpu list, empty map and a full list of resource requests is expected", - t, func() { - pod := &v1.Pod{ - Spec: *getMockPodSpecMultiContSamegpu(), - } - samegpuSearchmap, allResourceRequests := containerRequests(pod, map[string]bool{}) - So(len(samegpuSearchmap), ShouldEqual, 0) - So(len(allResourceRequests), ShouldEqual, len(pod.Spec.Containers)) - }) - Convey( - "With same-gpu list, map of respective indexes should be returned and full list of resource requests", - t, func() { - pod := &v1.Pod{ - Spec: *getMockPodSpecMultiContSamegpu(), - } - samegpuNames := map[string]bool{"container2": true, "container3": true} - samegpuSearchmap, allRequests := containerRequests(pod, samegpuNames) - So(len(samegpuSearchmap), ShouldEqual, len(samegpuNames)) - So(len(allRequests), ShouldEqual, len(pod.Spec.Containers)) - So(samegpuSearchmap, ShouldResemble, map[int]bool{1: true, 2: true}) - }) + for _, pluginResourceName := range []string{i915PluginResource, xePluginResource} { + Convey( + "With empty same-gpu list, empty map and a full list of resource requests is expected", + t, func() { + pod := &v1.Pod{ + Spec: *getMockPodSpecMultiContSamegpu(pluginResourceName), + } + samegpuSearchmap, allResourceRequests := containerRequests(pod, map[string]bool{}) + So(len(samegpuSearchmap), ShouldEqual, 0) + So(len(allResourceRequests), ShouldEqual, len(pod.Spec.Containers)) + }) + Convey( + "With same-gpu list, map of respective indexes should be returned and full list of resource requests", + t, func() { + pod := &v1.Pod{ + Spec: *getMockPodSpecMultiContSamegpu(pluginResourceName), + } + samegpuNames := map[string]bool{"container2": true, "container3": true} + samegpuSearchmap, allRequests := containerRequests(pod, samegpuNames) + So(len(samegpuSearchmap), ShouldEqual, len(samegpuNames)) + So(len(allRequests), ShouldEqual, len(pod.Spec.Containers)) + So(samegpuSearchmap, ShouldResemble, map[int]bool{1: true, 2: true}) + }) + } } From 7d1e4ce3caed76aaf14fe43fb94737a48e1f4a03 Mon Sep 17 00:00:00 2001 From: Ukri Niemimuukko Date: Mon, 18 Mar 2024 10:36:34 +0200 Subject: [PATCH 3/3] Version update --- gpu-aware-scheduling/go.mod | 61 ++++++++-------- gpu-aware-scheduling/go.sum | 139 ++++++++++++++++++++++++------------ 2 files changed, 125 insertions(+), 75 deletions(-) diff --git a/gpu-aware-scheduling/go.mod b/gpu-aware-scheduling/go.mod index 7c0bd0d..5c64114 100644 --- a/gpu-aware-scheduling/go.mod +++ b/gpu-aware-scheduling/go.mod @@ -3,33 +3,33 @@ module github.com/intel/platform-aware-scheduling/gpu-aware-scheduling go 1.21 require ( - github.com/intel/platform-aware-scheduling/extender v0.6.0 + github.com/intel/platform-aware-scheduling/extender v0.7.0 github.com/pkg/errors v0.9.1 github.com/smartystreets/goconvey v1.8.1 - github.com/stretchr/testify v1.8.4 - k8s.io/api v0.28.4 - k8s.io/apimachinery v0.28.4 - k8s.io/client-go v0.28.4 - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-scheduler v0.28.4 + github.com/stretchr/testify v1.9.0 + k8s.io/api v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v0.29.3 + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-scheduler v0.29.3 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/go-logr/logr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect - github.com/imdario/mergo v0.3.15 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect @@ -40,22 +40,21 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/smarty/assertions v1.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.11.1 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/gpu-aware-scheduling/go.sum b/gpu-aware-scheduling/go.sum index f018e74..f65fd2d 100644 --- a/gpu-aware-scheduling/go.sum +++ b/gpu-aware-scheduling/go.sum @@ -2,33 +2,45 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -36,12 +48,16 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/intel/platform-aware-scheduling/extender v0.6.0 h1:0bd1qQxIZ6omW/LjVJTRXAY0okUIMCyY4TC06bpBizw= -github.com/intel/platform-aware-scheduling/extender v0.6.0/go.mod h1:TJox4q4p2veXqkIL4zvVPq0Jot1DT/VfhBD24X1ObBQ= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/intel/platform-aware-scheduling/extender v0.7.0 h1:tcAhrULa7usSHQkzJI8UScKXvkZe5iRy1lZLMuhM4d8= +github.com/intel/platform-aware-scheduling/extender v0.7.0/go.mod h1:7dCus4LZba+H4fPQ6rlAZD5YDeu7jR4VGzGe0GQPBGQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -66,10 +82,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -84,63 +100,92 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.11.1 h1:ojD5zOW8+7dOGzdnNgersm8aPfcDjhMp12UfG93NIMc= -golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -152,23 +197,29 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= -k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kube-scheduler v0.28.4 h1:QdUvqNn4z9JbgLIwemj9zeGW5kJUtW+WDd8rev5HBDA= -k8s.io/kube-scheduler v0.28.4/go.mod h1:pHz0xQOjwDc+VpHhCE2KM1fER3ldm0vABnq0myBHsoI= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-scheduler v0.29.3 h1:2JKNfkReDHS33Ase2aejcvH6qHLi3KPmS2ejaHXE4YQ= +k8s.io/kube-scheduler v0.29.3/go.mod h1:1NLHViSwFFddWHH4U9UGD57clINAtje/PEs6PjOYQZg= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=