Skip to content

Commit

Permalink
[rework-this][test/int/pod] Simple pod group unsuspend delete test.
Browse files Browse the repository at this point in the history
  • Loading branch information
trasc committed Nov 14, 2023
1 parent cea2a81 commit d59e250
Showing 1 changed file with 75 additions and 0 deletions.
75 changes: 75 additions & 0 deletions test/integration/controller/jobs/pod/pod_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,81 @@ var _ = ginkgo.Describe("Pod controller interacting with scheduler", ginkgo.Orde
util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1)
})

ginkgo.FIt("Should schedule pod groups as they fit in their ClusterQueue", func() {
ginkgo.By("creating localQueue")
localQueue = testing.MakeLocalQueue("local-queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())

pod1 := testingpod.MakePod("dev-pod1", ns.Name).
Group("dev-pods").
GroupTotalCount("2").
Queue(localQueue.Name).
Request(corev1.ResourceCPU, "2").
Obj()
pod2 := testingpod.MakePod("dev-pod2", ns.Name).
Group("dev-pods").
GroupTotalCount("2").
Queue(localQueue.Name).
Request(corev1.ResourceCPU, "2").
Obj()

ginkgo.By("creating the pods", func() {
gomega.Expect(k8sClient.Create(ctx, pod1)).Should(gomega.Succeed())
gomega.Expect(k8sClient.Create(ctx, pod2)).Should(gomega.Succeed())

})

// the composed workload is created
wlKey := types.NamespacedName{
Namespace: pod1.Namespace,
Name: "dev-pods",
}
wl := &kueue.Workload{}

ginkgo.By("checking the composed workload is created", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlKey, wl)).Should(gomega.Succeed())
}, util.Timeout, util.Interval).Should(gomega.Succeed())
})

createdPod := &corev1.Pod{}
ginkgo.By("check the pods are unsuspended", func() {
gomega.Eventually(func(g gomega.Gomega) []corev1.PodSchedulingGate {
g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: pod1.Name, Namespace: pod1.Namespace}, createdPod)).
To(gomega.Succeed())
return createdPod.Spec.SchedulingGates
}, util.Timeout, util.Interval).Should(gomega.BeEmpty())
gomega.Expect(createdPod.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(spotUntaintedFlavor.Name))
gomega.Eventually(func(g gomega.Gomega) []corev1.PodSchedulingGate {
g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: pod2.Name, Namespace: pod2.Namespace}, createdPod)).
To(gomega.Succeed())
return createdPod.Spec.SchedulingGates
}, util.Timeout, util.Interval).Should(gomega.BeEmpty())
gomega.Expect(createdPod.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(spotUntaintedFlavor.Name))
})
util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 0)
util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1)

ginkgo.By("deleting the localQueue to prevent readmission", func() {
gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed())
})

ginkgo.By("clearing the workload's admission to stop the job", func() {
gomega.Expect(k8sClient.Get(ctx, wlKey, wl)).Should(gomega.Succeed())
gomega.Expect(util.SetQuotaReservation(ctx, k8sClient, wl, nil)).Should(gomega.Succeed())
util.SyncAdmittedConditionForWorkloads(ctx, k8sClient, wl)
})

ginkgo.By("checking pods are deleted", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(pod1), createdPod)).Should(gomega.Succeed())
g.Expect(createdPod.DeletionTimestamp.IsZero()).To(gomega.BeFalse())
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(pod2), createdPod)).Should(gomega.Succeed())
g.Expect(createdPod.DeletionTimestamp.IsZero()).To(gomega.BeFalse())
}, util.Timeout, util.Interval).Should(gomega.Succeed())
})
})

ginkgo.When("The workload's admission is removed", func() {
ginkgo.It("Should not restore the original node selectors", func() {
localQueue := testing.MakeLocalQueue("local-queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
Expand Down

0 comments on commit d59e250

Please sign in to comment.