Skip to content

Commit

Permalink
Merge pull request #4441 from marwanad/fix-pod-equivalence-perf
Browse files Browse the repository at this point in the history
fix pod equivalency checks for pods with projected volumes
  • Loading branch information
k8s-ci-robot authored Dec 24, 2021
2 parents fca1dc0 + 286f44e commit 6d19e3d
Show file tree
Hide file tree
Showing 7 changed files with 289 additions and 8 deletions.
4 changes: 2 additions & 2 deletions cluster-autoscaler/core/equivalence_groups.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ limitations under the License.
package core

import (
"k8s.io/autoscaler/cluster-autoscaler/utils"
"reflect"

apiv1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
Expand Down Expand Up @@ -67,7 +67,7 @@ func groupPodsBySchedulingProperties(pods []*apiv1.Pod) map[equivalenceGroupId][

matchingFound := false
for _, g := range equivalenceGroupsByController[controllerRef.UID] {
if reflect.DeepEqual(pod.Labels, g.representant.Labels) && apiequality.Semantic.DeepEqual(pod.Spec, g.representant.Spec) {
if reflect.DeepEqual(pod.Labels, g.representant.Labels) && utils.PodSpecSemanticallyEqual(pod.Spec, g.representant.Spec) {
matchingFound = true
podEquivalenceGroups[g.id] = append(podEquivalenceGroups[g.id], pod)
break
Expand Down
40 changes: 38 additions & 2 deletions cluster-autoscaler/core/equivalence_groups_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,25 @@ func TestGroupSchedulablePodsForNode(t *testing.T) {
},
}

rc3 := apiv1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "rc3",
Namespace: "default",
SelfLink: "api/v1/namespaces/default/replicationcontrollers/rc3",
UID: "12345678-1234-1234-1234-12345678901e",
},
}

rc4 := apiv1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "rc4",
Namespace: "default",
SelfLink: "api/v1/namespaces/default/replicationcontrollers/rc4",
UID: "12345678-1234-1234-1234-12345678901f",
},
}

projectedSAVol := BuildServiceTokenProjectedVolumeSource("path")
p1 := BuildTestPod("p1", 1500, 200000)
p2_1 := BuildTestPod("p2_1", 3000, 200000)
p2_1.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
Expand All @@ -56,10 +75,24 @@ func TestGroupSchedulablePodsForNode(t *testing.T) {
p3_1.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID)
p3_2 := BuildTestPod("p3_2", 100, 200000)
p3_2.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID)
unschedulablePods := []*apiv1.Pod{p1, p2_1, p2_2, p3_1, p3_2}
// Two pods with projected volume sources should be in the same equivalence group
p4_1 := BuildTestPod("p4_1", 100, 200000)
p4_1.OwnerReferences = GenerateOwnerReferences(rc3.Name, "ReplicationController", "extensions/v1beta1", rc3.UID)
p4_1.Spec.Volumes = []apiv1.Volume{{Name: "kube-api-access-nz94b", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}}}
p4_2 := BuildTestPod("p4_2", 100, 200000)
p4_2.OwnerReferences = GenerateOwnerReferences(rc3.Name, "ReplicationController", "extensions/v1beta1", rc3.UID)
p4_2.Spec.Volumes = []apiv1.Volume{{Name: "kube-api-access-mo25i", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}}}
// Two pods with flex volume sources should be in different equivalence groups
p5_1 := BuildTestPod("p5_1", 100, 200000)
p5_1.Spec.Volumes = []apiv1.Volume{{Name: "volume-nz94b", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}}}
p5_1.OwnerReferences = GenerateOwnerReferences(rc4.Name, "ReplicationController", "extensions/v1beta1", rc4.UID)
p5_2 := BuildTestPod("p5_2", 100, 200000)
p5_2.Spec.Volumes = []apiv1.Volume{{Name: "volume-mo25i", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}}}
p5_2.OwnerReferences = GenerateOwnerReferences(rc4.Name, "ReplicationController", "extensions/v1beta1", rc4.UID)
unschedulablePods := []*apiv1.Pod{p1, p2_1, p2_2, p3_1, p3_2, p4_1, p4_2, p5_1, p5_2}

podGroups := groupPodsBySchedulingProperties(unschedulablePods)
assert.Equal(t, 3, len(podGroups))
assert.Equal(t, 6, len(podGroups))

wantedGroups := []struct {
pods []*apiv1.Pod
Expand All @@ -68,6 +101,9 @@ func TestGroupSchedulablePodsForNode(t *testing.T) {
{pods: []*apiv1.Pod{p1}},
{pods: []*apiv1.Pod{p2_1, p2_2}},
{pods: []*apiv1.Pod{p3_1, p3_2}},
{pods: []*apiv1.Pod{p4_1, p4_2}},
{pods: []*apiv1.Pod{p5_1}},
{pods: []*apiv1.Pod{p5_2}},
}

equal := func(a, b []*apiv1.Pod) bool {
Expand Down
4 changes: 2 additions & 2 deletions cluster-autoscaler/core/utils/pod_schedulable.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ limitations under the License.
package utils

import (
"k8s.io/autoscaler/cluster-autoscaler/utils"
"reflect"

apiv1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
)
Expand Down Expand Up @@ -50,7 +50,7 @@ type PodSchedulableMap map[string][]PodSchedulableInfo

// Match tests if given pod matches PodSchedulableInfo
func (psi *PodSchedulableInfo) Match(pod *apiv1.Pod) bool {
return reflect.DeepEqual(pod.Labels, psi.labels) && apiequality.Semantic.DeepEqual(pod.Spec, psi.spec)
return reflect.DeepEqual(pod.Labels, psi.labels) && utils.PodSpecSemanticallyEqual(pod.Spec, psi.spec)
}

// Get returns scheduling info for given pod if matching one exists in PodSchedulableMap
Expand Down
20 changes: 18 additions & 2 deletions cluster-autoscaler/core/utils/pod_schedulable_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ limitations under the License.
package utils

import (
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"testing"

"k8s.io/autoscaler/cluster-autoscaler/simulator"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"

apiv1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -74,12 +74,28 @@ func TestPodSchedulableMap(t *testing.T) {
assert.Equal(t, cpuErr, err)

// Another replica in rc1
podInRc1_2 := BuildTestPod("podInRc1_1", 500, 1000)
podInRc1_2 := BuildTestPod("podInRc1_2", 500, 1000)
podInRc1_2.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
err, found = pMap.Get(podInRc1_2)
assert.True(t, found)
assert.Nil(t, err)

// A replica in rc1 with a projected volume
podInRc1ProjectedVol := BuildTestPod("podInRc1_ProjectedVol", 500, 1000)
podInRc1ProjectedVol.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
podInRc1ProjectedVol.Spec.Volumes = []apiv1.Volume{{Name: "kube-api-access-nz94b", VolumeSource: apiv1.VolumeSource{Projected: BuildServiceTokenProjectedVolumeSource("path")}}}
err, found = pMap.Get(podInRc1ProjectedVol)
assert.True(t, found)
assert.Nil(t, err)

// A replica in rc1 with a non-projected volume
podInRc1FlexVol := BuildTestPod("podInRc1_FlexVol", 500, 1000)
podInRc1FlexVol.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
podInRc1FlexVol.Spec.Volumes = []apiv1.Volume{{Name: "volume-mo25i", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}}}
err, found = pMap.Get(podInRc1FlexVol)
assert.False(t, found)
assert.Nil(t, err)

// A pod in rc1, but with different requests
differentPodInRc1 := BuildTestPod("differentPodInRc1", 1000, 1000)
differentPodInRc1.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID)
Expand Down
14 changes: 14 additions & 0 deletions cluster-autoscaler/utils/test/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,20 @@ func BuildTestPod(name string, cpu int64, mem int64) *apiv1.Pod {
return pod
}

// BuildServiceTokenProjectedVolumeSource returns a ProjectedVolumeSource with SA token
// projection
func BuildServiceTokenProjectedVolumeSource(path string) *apiv1.ProjectedVolumeSource {
return &apiv1.ProjectedVolumeSource{
Sources: []apiv1.VolumeProjection{
{
ServiceAccountToken: &apiv1.ServiceAccountTokenProjection{
Path: path,
},
},
},
}
}

const (
// cannot use constants from gpu module due to cyclic package import
resourceNvidiaGPU = "nvidia.com/gpu"
Expand Down
38 changes: 38 additions & 0 deletions cluster-autoscaler/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package utils

import (
apiv1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
klog "k8s.io/klog/v2"

"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
Expand Down Expand Up @@ -54,3 +55,40 @@ func FilterOutNodes(nodes []*apiv1.Node, nodesToFilterOut []*apiv1.Node) []*apiv

return filtered
}

// PodSpecSemanticallyEqual returns true if two pod specs are similar after dropping
// the fields we don't care about
// Due to the generated suffixes, a strict DeepEquals check will fail and generate
// an equivalence group per pod which is undesirable.
// Projected volumes do not impact scheduling so we should ignore them
func PodSpecSemanticallyEqual(p1 apiv1.PodSpec, p2 apiv1.PodSpec) bool {
p1Spec := sanitizeProjectedVolumesAndMounts(p1)
p2Spec := sanitizeProjectedVolumesAndMounts(p2)
return apiequality.Semantic.DeepEqual(p1Spec, p2Spec)
}

// sanitizeProjectedVolumesAndMounts returns a pod spec with projected volumes
// and their mounts removed
func sanitizeProjectedVolumesAndMounts(podSpec apiv1.PodSpec) apiv1.PodSpec {
projectedVolumeNames := map[string]bool{}
var volumes []apiv1.Volume
for _, v := range podSpec.Volumes {
if v.Projected == nil {
volumes = append(volumes, v)
} else {
projectedVolumeNames[v.Name] = true
}
}
podSpec.Volumes = volumes

for i := range podSpec.Containers {
var volumeMounts []apiv1.VolumeMount
for _, mount := range podSpec.Containers[i].VolumeMounts {
if ok := projectedVolumeNames[mount.Name]; !ok {
volumeMounts = append(volumeMounts, mount)
}
}
podSpec.Containers[i].VolumeMounts = volumeMounts
}
return podSpec
}
177 changes: 177 additions & 0 deletions cluster-autoscaler/utils/utils_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package utils

import (
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
"testing"
)

func TestPodSpecSemanticallyEqual(t *testing.T) {
projectedSAVol := test.BuildServiceTokenProjectedVolumeSource("path")

tests := []struct {
name string
p1Spec apiv1.PodSpec
p2Spec apiv1.PodSpec
result bool
}{
{
name: "two pods with projected volume sources",
p1Spec: apiv1.PodSpec{
Volumes: []apiv1.Volume{
{Name: "projected1", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
},
},
p2Spec: apiv1.PodSpec{
Volumes: []apiv1.Volume{
{Name: "projected2", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
},
},
result: true,
},
{
name: "two pods with different volumes",
p1Spec: apiv1.PodSpec{
Volumes: []apiv1.Volume{
{Name: "vol1", VolumeSource: apiv1.VolumeSource{EmptyDir: &apiv1.EmptyDirVolumeSource{Medium: ""}}},
},
},
p2Spec: apiv1.PodSpec{
Volumes: []apiv1.Volume{
{Name: "vol2", VolumeSource: apiv1.VolumeSource{EmptyDir: &apiv1.EmptyDirVolumeSource{Medium: ""}}},
},
},
result: false,
},
{
name: "two pod different containers",
p1Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar"},
},
},
p2Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{Image: "foo/baz", Name: "foobaz"},
},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := PodSpecSemanticallyEqual(tt.p1Spec, tt.p2Spec)
assert.Equal(t, tt.result, result)
})
}
}

func TestSanitizeProjectedVolumesAndMounts(t *testing.T) {
projectedSAVol := test.BuildServiceTokenProjectedVolumeSource("path")

tests := []struct {
name string
inputPodSpec apiv1.PodSpec
outputPodSpec apiv1.PodSpec
}{
{
name: "pod spec with only projected volumes",
inputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Volumes: []apiv1.Volume{
{Name: "projected1", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
{Name: "projected2", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar", VolumeMounts: []apiv1.VolumeMount{{Name: "projected1"}}},
{Image: "foo/baz", Name: "foobaz", VolumeMounts: []apiv1.VolumeMount{{Name: "projected2"}}},
},
},
outputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar"},
{Image: "foo/baz", Name: "foobaz"},
},
},
},
{
name: "pod spec with only non-projected volumes",
inputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Volumes: []apiv1.Volume{
{Name: "volume-nz94a", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
{Name: "volume-nz94b", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94a"}}},
{Image: "foo/baz", Name: "foo/baz", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94b"}}},
},
},
outputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Volumes: []apiv1.Volume{
{Name: "volume-nz94a", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
{Name: "volume-nz94b", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94a"}}},
{Image: "foo/baz", Name: "foo/baz", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94b"}}},
},
},
},
{
name: "pod spec with a mix of volume types",
inputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Volumes: []apiv1.Volume{
{Name: "volume-nz94b", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
{Name: "kube-api-access-nz94a", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
{Name: "projected2", VolumeSource: apiv1.VolumeSource{Projected: projectedSAVol}},
{Name: "empty-dir", VolumeSource: apiv1.VolumeSource{EmptyDir: &apiv1.EmptyDirVolumeSource{Medium: ""}}},
},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar", VolumeMounts: []apiv1.VolumeMount{{Name: "kube-api-access-nz94a"}}},
{Image: "foo/baz", Name: "foo/baz", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94b"}, {Name: "kube-api-access-nz94a"}, {Name: "empty-dir"}, {Name: "projected2"}}},
{Image: "foo/qux", Name: "foo/qux"},
},
},
outputPodSpec: apiv1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
Volumes: []apiv1.Volume{
{Name: "volume-nz94b", VolumeSource: apiv1.VolumeSource{FlexVolume: &apiv1.FlexVolumeSource{Driver: "testDriver"}}},
{Name: "empty-dir", VolumeSource: apiv1.VolumeSource{EmptyDir: &apiv1.EmptyDirVolumeSource{Medium: ""}}},
},
Containers: []apiv1.Container{
{Image: "foo/bar", Name: "foobar"},
{Image: "foo/baz", Name: "foo/baz", VolumeMounts: []apiv1.VolumeMount{{Name: "volume-nz94b"}, {Name: "empty-dir"}}},
{Image: "foo/qux", Name: "foo/qux"},
},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := sanitizeProjectedVolumesAndMounts(tt.inputPodSpec)
assert.True(t, assert.ObjectsAreEqualValues(tt.outputPodSpec, got), "\ngot: %#v\nwant: %#v", got, tt.outputPodSpec)
})
}
}

0 comments on commit 6d19e3d

Please sign in to comment.