diff --git a/cluster-autoscaler/core/equivalence_groups.go b/cluster-autoscaler/core/equivalence_groups.go index e509d61ffbee..794227c78165 100644 --- a/cluster-autoscaler/core/equivalence_groups.go +++ b/cluster-autoscaler/core/equivalence_groups.go @@ -67,7 +67,7 @@ func groupPodsBySchedulingProperties(pods []*apiv1.Pod) map[equivalenceGroupId][ matchingFound := false for _, g := range equivalenceGroupsByController[controllerRef.UID] { - if reflect.DeepEqual(pod.Labels, g.representant.Labels) && apiequality.Semantic.DeepEqual(pod.Spec, g.representant.Spec) { + if reflect.DeepEqual(pod.Labels, g.representant.Labels) && podSpecSemanticallyEqual(pod.Spec, g.representant.Spec) { matchingFound = true podEquivalenceGroups[g.id] = append(podEquivalenceGroups[g.id], pod) break @@ -87,3 +87,22 @@ func groupPodsBySchedulingProperties(pods []*apiv1.Pod) map[equivalenceGroupId][ return podEquivalenceGroups } + +func podSpecSemanticallyEqual(p1 apiv1.PodSpec, p2 apiv1.PodSpec) bool { + p1Spec := sanitizeVolumesAndMounts(p1) + p2Spec := sanitizeVolumesAndMounts(p2) + return apiequality.Semantic.DeepEqual(p1Spec, p2Spec) +} + +// sanitizeVolumesAndMounts removes volumes and volume mounts from the pod spec +// Due to the generated suffixes, a strict DeepEquals check will fail and generate +// an equivalence group per pod which is undesirable. +// This is also applicable to a pod without volumes scenario due to the volume +// projection of the default service account onto the kube-api-access volume +func sanitizeVolumesAndMounts(podSpec apiv1.PodSpec) apiv1.PodSpec { + podSpec.Volumes = nil + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeMounts = nil + } + return podSpec +} diff --git a/cluster-autoscaler/core/equivalence_groups_test.go b/cluster-autoscaler/core/equivalence_groups_test.go index ff71c832ba4e..9c965564164d 100644 --- a/cluster-autoscaler/core/equivalence_groups_test.go +++ b/cluster-autoscaler/core/equivalence_groups_test.go @@ -47,6 +47,15 @@ func TestGroupSchedulablePodsForNode(t *testing.T) { }, } + rc3 := apiv1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rc3", + Namespace: "default", + SelfLink: "api/v1/namespaces/default/replicationcontrollers/rc2", + UID: "12345678-1234-1234-1234-12345678901a", + }, + } + p1 := BuildTestPod("p1", 1500, 200000) p2_1 := BuildTestPod("p2_1", 3000, 200000) p2_1.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID) @@ -56,10 +65,16 @@ func TestGroupSchedulablePodsForNode(t *testing.T) { p3_1.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID) p3_2 := BuildTestPod("p3_2", 100, 200000) p3_2.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc2.UID) - unschedulablePods := []*apiv1.Pod{p1, p2_1, p2_2, p3_1, p3_2} + p4_1 := BuildTestPod("p4_1", 100, 200000) + p4_1.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc3.UID) + p4_1.Spec.Volumes = []apiv1.Volume{{Name: "kube-api-access-nz94b"}} + p4_2 := BuildTestPod("p4_2", 100, 200000) + p4_2.OwnerReferences = GenerateOwnerReferences(rc2.Name, "ReplicationController", "extensions/v1beta1", rc3.UID) + p4_2.Spec.Volumes = []apiv1.Volume{{Name: "kube-api-access-mo25i"}} + unschedulablePods := []*apiv1.Pod{p1, p2_1, p2_2, p3_1, p3_2, p4_1, p4_2} podGroups := groupPodsBySchedulingProperties(unschedulablePods) - assert.Equal(t, 3, len(podGroups)) + assert.Equal(t, 4, len(podGroups)) wantedGroups := []struct { pods []*apiv1.Pod @@ -68,6 +83,7 @@ func TestGroupSchedulablePodsForNode(t *testing.T) { {pods: []*apiv1.Pod{p1}}, {pods: []*apiv1.Pod{p2_1, p2_2}}, {pods: []*apiv1.Pod{p3_1, p3_2}}, + {pods: []*apiv1.Pod{p4_1, p4_2}}, } equal := func(a, b []*apiv1.Pod) bool { @@ -102,3 +118,31 @@ func TestGroupSchedulablePodsForNode(t *testing.T) { assert.True(t, w.found, fmt.Errorf("Expected pod group: %+v", w)) } } + +func TestPodSpecSemanticallyEqual(t *testing.T) { + rc1 := apiv1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rc1", + Namespace: "default", + UID: "12345678-1234-1234-1234-123456789012", + }, + } + p1_1 := BuildTestPod("p1_1", 1500, 200000) + p1_1.Spec.Volumes = []apiv1.Volume{{Name: "vol-8bx7p"}} + p1_1.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID) + p1_2 := BuildTestPod("p1_2", 1500, 200000) + p1_2.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{{Name: "vol-3en7l"}} + p1_2.OwnerReferences = GenerateOwnerReferences(rc1.Name, "ReplicationController", "extensions/v1beta1", rc1.UID) + assert.True(t, podSpecSemanticallyEqual(p1_1.Spec, p1_2.Spec)) +} + +func TestSanitizeVolumesAndMounts(t *testing.T) { + p1 := BuildTestPod("p1", 1500, 200000) + p1.Spec.Volumes = []apiv1.Volume{{Name: "vol1"}, {Name: "vol2"}} + p2 := BuildTestPod("p2", 1500, 200000) + p2.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{{Name: "volmount1"}} + p1SanitizedSpec := sanitizeVolumesAndMounts(p1.Spec) + p2SanitizedSpec := sanitizeVolumesAndMounts(p2.Spec) + assert.Nil(t, p1SanitizedSpec.Volumes) + assert.Nil(t, p2SanitizedSpec.Containers[0].VolumeMounts) +}