Skip to content

Commit

Permalink
Propagate gomega.Gomega on integration/scheduler tests. (kubernetes-s…
Browse files Browse the repository at this point in the history
…igs#3329)

* Propagate gomega.Gomega on fair_sharing_test.go.

* Propagate gomega.Gomega on podsready/scheduler_test.go.

* Propagate gomega.Gomega on preemption_test.go.

* Propagate gomega.Gomega on workload_controller_test.go.

* Propagate gomega.Gomega on scheduler_test.go.
  • Loading branch information
mbobrovskyi authored and kannon92 committed Nov 19, 2024
1 parent 50e0839 commit 64ec7e4
Show file tree
Hide file tree
Showing 5 changed files with 330 additions and 357 deletions.
6 changes: 3 additions & 3 deletions test/integration/scheduler/fairsharing/fair_sharing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ func finishRunningWorkloadsInCQ(cq *kueue.ClusterQueue, n int) {

func finishEvictionOfWorkloadsInCQ(cq *kueue.ClusterQueue, n int) {
finished := sets.New[types.UID]()
gomega.EventuallyWithOffset(1, func(g gomega.Gomega) int {
gomega.EventuallyWithOffset(1, func(g gomega.Gomega) {
var wList kueue.WorkloadList
g.Expect(k8sClient.List(ctx, &wList)).To(gomega.Succeed())
for i := 0; i < len(wList.Items) && finished.Len() < n; i++ {
Expand All @@ -328,6 +328,6 @@ func finishEvictionOfWorkloadsInCQ(cq *kueue.ClusterQueue, n int) {
finished.Insert(wl.UID)
}
}
return finished.Len()
}, util.Timeout, util.Interval).Should(gomega.Equal(n), "Not enough workloads evicted")
g.Expect(finished.Len()).Should(gomega.Equal(n), "Not enough workloads evicted")
}, util.Timeout, util.Interval).Should(gomega.Succeed())
}
102 changes: 51 additions & 51 deletions test/integration/scheduler/podsready/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,14 @@ var _ = ginkgo.Describe("SchedulerWithWaitForPodsReady", func() {
util.ExpectWorkloadsToBeWaiting(ctx, k8sClient, devWl)

ginkgo.By("update the first workload to be in the PodsReady condition and verify the second workload is admitted")
gomega.Eventually(func() error {
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl), prodWl)).Should(gomega.Succeed())
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl), prodWl)).Should(gomega.Succeed())
apimeta.SetStatusCondition(&prodWl.Status.Conditions, metav1.Condition{
Type: kueue.WorkloadPodsReady,
Status: metav1.ConditionTrue,
Reason: "PodsReady",
})
return k8sClient.Status().Update(ctx, prodWl)
g.Expect(k8sClient.Status().Update(ctx, prodWl)).Should(gomega.Succeed())
}, util.Timeout, util.Interval).Should(gomega.Succeed())
util.ExpectWorkloadsToHaveQuotaReservation(ctx, k8sClient, devClusterQ.Name, devWl)
})
Expand Down Expand Up @@ -220,10 +220,10 @@ var _ = ginkgo.Describe("SchedulerWithWaitForPodsReady", func() {
ginkgo.By("awaiting for the Admitted=True condition to be added to 'prod1")
// We assume that the test will get to this check before the timeout expires and the
// kueue cancels the admission. Mentioning this in case this test flakes in the future.
gomega.Eventually(func() bool {
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl1), prodWl1)).Should(gomega.Succeed())
return workload.HasQuotaReservation(prodWl1)
}, util.Timeout, util.Interval).Should(gomega.BeTrue())
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl1), prodWl1)).Should(gomega.Succeed())
g.Expect(workload.HasQuotaReservation(prodWl1)).Should(gomega.BeTrue())
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("determining the time of admission as LastTransitionTime for the Admitted condition")
admittedAt := apimeta.FindStatusCondition(prodWl1.Status.Conditions, kueue.WorkloadQuotaReserved).LastTransitionTime.Time
Expand Down Expand Up @@ -335,29 +335,29 @@ var _ = ginkgo.Describe("SchedulerWithWaitForPodsReady", func() {
util.ExpectWorkloadsToBeWaiting(ctx, k8sClient, devWl)

ginkgo.By("verify the 'prod' queue resources are used")
gomega.Eventually(func() kueue.ClusterQueueStatus {
gomega.Eventually(func(g gomega.Gomega) {
var updatedCQ kueue.ClusterQueue
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodClusterQ), &updatedCQ)).To(gomega.Succeed())
return updatedCQ.Status
}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
PendingWorkloads: 0,
ReservingWorkloads: 1,
AdmittedWorkloads: 1,
FlavorsReservation: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("2"),
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodClusterQ), &updatedCQ)).To(gomega.Succeed())
g.Expect(updatedCQ.Status).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
PendingWorkloads: 0,
ReservingWorkloads: 1,
AdmittedWorkloads: 1,
FlavorsReservation: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("2"),
}},
}},
}},
FlavorsUsage: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("2"),
FlavorsUsage: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("2"),
}},
}},
}},
}, ignoreCQConditions, ignorePendingWorkloadsStatus))
}, ignoreCQConditions, ignorePendingWorkloadsStatus))
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("wait for the timeout to be exceeded")
time.Sleep(podsReadyTimeout)
Expand All @@ -366,35 +366,35 @@ var _ = ginkgo.Describe("SchedulerWithWaitForPodsReady", func() {
util.FinishEvictionForWorkloads(ctx, k8sClient, prodWl)

ginkgo.By("wait for the first workload to be unadmitted")
gomega.Eventually(func() *kueue.Admission {
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl), prodWl)).Should(gomega.Succeed())
return prodWl.Status.Admission
}, util.Timeout, util.Interval).Should(gomega.BeNil())
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodWl), prodWl)).Should(gomega.Succeed())
g.Expect(prodWl.Status.Admission).Should(gomega.BeNil())
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("verify the queue resources are freed")
gomega.Eventually(func() kueue.ClusterQueueStatus {
gomega.Eventually(func(g gomega.Gomega) {
var updatedCQ kueue.ClusterQueue
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodClusterQ), &updatedCQ)).To(gomega.Succeed())
return updatedCQ.Status
}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
PendingWorkloads: 1,
ReservingWorkloads: 0,
AdmittedWorkloads: 0,
FlavorsReservation: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("0"),
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(prodClusterQ), &updatedCQ)).To(gomega.Succeed())
g.Expect(updatedCQ.Status).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
PendingWorkloads: 1,
ReservingWorkloads: 0,
AdmittedWorkloads: 0,
FlavorsReservation: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("0"),
}},
}},
}},
FlavorsUsage: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("0"),
FlavorsUsage: []kueue.FlavorUsage{{
Name: "default",
Resources: []kueue.ResourceUsage{{
Name: corev1.ResourceCPU,
Total: resource.MustParse("0"),
}},
}},
}},
}, ignoreCQConditions, ignorePendingWorkloadsStatus))
}, ignoreCQConditions, ignorePendingWorkloadsStatus))
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("verify the active workload metric is decreased for the cluster queue")
util.ExpectReservingActiveWorkloadsMetric(prodClusterQ, 0)
Expand Down
12 changes: 6 additions & 6 deletions test/integration/scheduler/preemption_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,19 +424,19 @@ var _ = ginkgo.Describe("Preemption", func() {
gomega.Expect(k8sClient.Create(ctx, gammaWl)).To(gomega.Succeed())

var evictedWorkloads []*kueue.Workload
gomega.Eventually(func() int {
gomega.Eventually(func(g gomega.Gomega) {
evictedWorkloads = util.FilterEvictedWorkloads(ctx, k8sClient, betaWls...)
return len(evictedWorkloads)
}, util.Timeout, util.Interval).Should(gomega.Equal(1), "Number of evicted workloads")
g.Expect(evictedWorkloads).Should(gomega.HaveLen(1), "Number of evicted workloads")
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("Finishing eviction for first set of preempted workloads")
util.FinishEvictionForWorkloads(ctx, k8sClient, evictedWorkloads...)
util.ExpectWorkloadsToBeAdmittedCount(ctx, k8sClient, 1, alphaWl, gammaWl)

gomega.Eventually(func() int {
gomega.Eventually(func(g gomega.Gomega) {
evictedWorkloads = util.FilterEvictedWorkloads(ctx, k8sClient, betaWls...)
return len(evictedWorkloads)
}, util.Timeout, util.Interval).Should(gomega.Equal(2), "Number of evicted workloads")
g.Expect(evictedWorkloads).Should(gomega.HaveLen(2), "Number of evicted workloads")
}, util.Timeout, util.Interval).Should(gomega.Succeed())

ginkgo.By("Finishing eviction for second set of preempted workloads")
util.FinishEvictionForWorkloads(ctx, k8sClient, evictedWorkloads...)
Expand Down
Loading

0 comments on commit 64ec7e4

Please sign in to comment.