Skip to content

Commit

Permalink
Propagate gomega.Gomega to gomega.Consistently(). (kubernetes-sigs#3345)
Browse files Browse the repository at this point in the history
  • Loading branch information
mbobrovskyi authored and PBundyra committed Nov 5, 2024
1 parent c4602f6 commit f892e51
Show file tree
Hide file tree
Showing 7 changed files with 72 additions and 89 deletions.
10 changes: 4 additions & 6 deletions test/e2e/singlecluster/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,12 +389,10 @@ var _ = ginkgo.Describe("Kueue", func() {
ginkgo.By("checking the job remains suspended", func() {
createdJob := &batchv1.Job{}
jobKey := client.ObjectKeyFromObject(sampleJob)
gomega.Consistently(func() bool {
if err := k8sClient.Get(ctx, jobKey, createdJob); err != nil {
return false
}
return ptr.Deref(createdJob.Spec.Suspend, false)
}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, jobKey, createdJob)).Should(gomega.Succeed())
g.Expect(createdJob.Spec.Suspend).Should(gomega.Equal(ptr.To(true)))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("setting the check as successful", func() {
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/singlecluster/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,7 @@ var _ = ginkgo.Describe("Pod groups", func() {
for _, origPod := range group[:2] {
var p corev1.Pod
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
g.Expect(p.Spec.SchedulingGates).
To(gomega.ContainElement(corev1.PodSchedulingGate{
Name: pod.SchedulingGateName}))
g.Expect(p.Spec.SchedulingGates).To(gomega.ContainElement(corev1.PodSchedulingGate{Name: pod.SchedulingGateName}))
}
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ var _ = ginkgo.Describe("Provisioning", ginkgo.Ordered, ginkgo.ContinueOnFailure
})

ginkgo.By("Checking no provision request is created", func() {
gomega.Consistently(func() error {
return k8sClient.Get(ctx, provReqKey, &createdRequest)
}, util.ConsistentDuration, util.Interval).Should(testing.BeNotFoundError())
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, provReqKey, &createdRequest)).Should(testing.BeNotFoundError())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})
})

Expand Down Expand Up @@ -293,8 +293,8 @@ var _ = ginkgo.Describe("Provisioning", ginkgo.Ordered, ginkgo.ContinueOnFailure
})

ginkgo.By("Checking that the provision request is preserved", func() {
gomega.Consistently(func() error {
return k8sClient.Get(ctx, provReqKey, &createdRequest)
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, provReqKey, &createdRequest)).Should(gomega.Succeed())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})
})
Expand Down
75 changes: 34 additions & 41 deletions test/integration/controller/jobs/job/job_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/onsi/gomega"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -143,12 +142,10 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
newJobLabelValue := "updatedValue"
createdJob.Labels["toCopyKey"] = newJobLabelValue
gomega.Expect(k8sClient.Update(ctx, createdJob)).Should(gomega.Succeed())
gomega.Consistently(func() string {
if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
return ""
}
return createdWorkload.Labels["toCopyKey"]
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal("toCopyValue"))
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, createdWorkload)).Should(gomega.Succeed())
g.Expect(createdWorkload.Labels).Should(gomega.HaveKeyWithValue("toCopyKey", "toCopyValue"))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())

ginkgo.By("updated workload should have the same created timestamp", func() {
gomega.Expect(createdWorkload.CreationTimestamp).Should(gomega.Equal(createdTime))
Expand All @@ -167,8 +164,8 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
gomega.Expect(k8sClient.Create(ctx, secondWl)).Should(gomega.Succeed())
util.ExpectObjectToBeDeleted(ctx, k8sClient, secondWl, false)
// check the original wl is still there
gomega.Consistently(func() error {
return k8sClient.Get(ctx, wlLookupKey, createdWorkload)
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, createdWorkload)).Should(gomega.Succeed())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
gomega.Eventually(func(g gomega.Gomega) {
ok, _ := testing.CheckEventRecordedFor(ctx, k8sClient, "DeletedWorkload", corev1.EventTypeNormal, fmt.Sprintf("Deleted not matching Workload: %v", workload.Key(secondWl)), lookupKey)
Expand Down Expand Up @@ -250,12 +247,10 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
}, util.Timeout, util.Interval).Should(gomega.Succeed())
gomega.Expect(createdJob.Spec.Template.Spec.NodeSelector).Should(gomega.HaveLen(1))
gomega.Expect(createdJob.Spec.Template.Spec.NodeSelector[instanceKey]).Should(gomega.Equal(spotFlavor.Name))
gomega.Consistently(func() bool {
if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
return false
}
return len(createdWorkload.Status.Conditions) == 2
}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, createdWorkload)).Should(gomega.Succeed())
g.Expect(createdWorkload.Status.Conditions).Should(gomega.HaveLen(2))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())

ginkgo.By("checking the workload is finished when job is completed")
createdJob.Status.Conditions = append(createdJob.Status.Conditions,
Expand Down Expand Up @@ -323,9 +318,9 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
ginkgo.By("Checking that the child workload is not created")
childWorkload := &kueue.Workload{}
childWlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(childJob.Name, childJob.UID), Namespace: ns.Name}
gomega.Consistently(func() bool {
return apierrors.IsNotFound(k8sClient.Get(ctx, childWlLookupKey, childWorkload))
}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, childWlLookupKey, childWorkload)).Should(testing.BeNotFoundError())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.It("Should not update the queue name of the workload with an empty value that the child job has", func() {
Expand All @@ -349,12 +344,10 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu

ginkgo.By("Checking that the queue name of the parent workload isn't updated with an empty value")
parentWorkload = &kueue.Workload{}
gomega.Consistently(func() bool {
if err := k8sClient.Get(ctx, parentWlLookupKey, parentWorkload); err != nil {
return true
}
return parentWorkload.Spec.QueueName == jobQueueName
}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, parentWlLookupKey, parentWorkload)).Should(gomega.Succeed())
g.Expect(parentWorkload.Spec.QueueName).Should(gomega.Equal(jobQueueName))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.It("Should change the suspension status of the child job when the parent's workload is not admitted", func() {
Expand Down Expand Up @@ -856,10 +849,10 @@ var _ = ginkgo.Describe("Job controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
})

ginkgo.By("verify the job is not started", func() {
gomega.Consistently(func() *bool {
gomega.Expect(k8sClient.Get(ctx, *jobLookupKey, createdJob)).Should(gomega.Succeed())
return createdJob.Spec.Suspend
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal(ptr.To(true)))
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, *jobLookupKey, createdJob)).Should(gomega.Succeed())
g.Expect(createdJob.Spec.Suspend).Should(gomega.Equal(ptr.To(true)))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("verify the job has the old label value", func() {
Expand Down Expand Up @@ -1235,10 +1228,10 @@ var _ = ginkgo.Describe("Interacting with scheduler", ginkgo.Ordered, ginkgo.Con
gomega.Expect(k8sClient.Create(ctx, prodJob2)).Should(gomega.Succeed())
lookupKey2 := types.NamespacedName{Name: prodJob2.Name, Namespace: prodJob2.Namespace}
createdProdJob2 := &batchv1.Job{}
gomega.Consistently(func() *bool {
gomega.Expect(k8sClient.Get(ctx, lookupKey2, createdProdJob2)).Should(gomega.Succeed())
return createdProdJob2.Spec.Suspend
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal(ptr.To(true)))
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, lookupKey2, createdProdJob2)).Should(gomega.Succeed())
g.Expect(createdProdJob2.Spec.Suspend).Should(gomega.Equal(ptr.To(true)))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
util.ExpectPendingWorkloadsMetric(prodClusterQ, 0, 1)
util.ExpectReservingActiveWorkloadsMetric(prodClusterQ, 1)

Expand Down Expand Up @@ -1517,10 +1510,10 @@ var _ = ginkgo.Describe("Interacting with scheduler", ginkgo.Ordered, ginkgo.Con
ginkgo.By("checking a second no-fit job does not start", func() {
gomega.Expect(k8sClient.Create(ctx, job2)).Should(gomega.Succeed())
createdJob2 := &batchv1.Job{}
gomega.Consistently(func() *bool {
gomega.Expect(k8sClient.Get(ctx, lookupKey2, createdJob2)).Should(gomega.Succeed())
return createdJob2.Spec.Suspend
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal(ptr.To(true)))
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, lookupKey2, createdJob2)).Should(gomega.Succeed())
g.Expect(createdJob2.Spec.Suspend).Should(gomega.Equal(ptr.To(true)))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
util.ExpectPendingWorkloadsMetric(prodClusterQ, 0, 1)
util.ExpectReservingActiveWorkloadsMetric(prodClusterQ, 1)
})
Expand Down Expand Up @@ -1666,10 +1659,10 @@ var _ = ginkgo.Describe("Interacting with scheduler", ginkgo.Ordered, ginkgo.Con

createdJob := &batchv1.Job{}
ginkgo.By("the job should stay suspended", func() {
gomega.Consistently(func() *bool {
gomega.Expect(k8sClient.Get(ctx, jobKey, createdJob)).Should(gomega.Succeed())
return createdJob.Spec.Suspend
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal(ptr.To(true)))
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, jobKey, createdJob)).Should(gomega.Succeed())
g.Expect(createdJob.Spec.Suspend).Should(gomega.Equal(ptr.To(true)))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("enable partial admission", func() {
Expand Down Expand Up @@ -1892,7 +1885,7 @@ var _ = ginkgo.Describe("Interacting with scheduler", ginkgo.Ordered, ginkgo.Con
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wll), wll)).Should(gomega.Succeed())
// Should have Evicted condition
isEvicting := apimeta.IsStatusConditionTrue(wll.Status.Conditions, kueue.WorkloadEvicted)
gomega.Expect(isEvicting).Should(gomega.BeTrue())
g.Expect(isEvicting).Should(gomega.BeTrue())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())

ginkgo.By("checking the first job becomes unsuspended after we update the Active field back to true")
Expand Down
20 changes: 9 additions & 11 deletions test/integration/controller/jobs/pod/pod_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -869,10 +869,10 @@ var _ = ginkgo.Describe("Pod controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
ginkgo.By("Failing the running pod")
util.SetPodsPhase(ctx, k8sClient, corev1.PodFailed, pod)
createdPod := &corev1.Pod{}
gomega.Consistently(func(g gomega.Gomega) []string {
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, podLookupKey, createdPod)).To(gomega.Succeed())
return createdPod.Finalizers
}, util.ConsistentDuration, util.Interval).Should(gomega.ContainElement(constants.ManagedByKueueLabel), "Pod should have finalizer")
g.Expect(createdPod.Finalizers).Should(gomega.ContainElement(constants.ManagedByKueueLabel), "Pod should have finalizer")
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
gomega.Expect(createdPod.Status.Phase).To(gomega.Equal(corev1.PodFailed))

ginkgo.By("Checking that WaitingForReplacementPods status is set to true", func() {
Expand Down Expand Up @@ -1039,17 +1039,15 @@ var _ = ginkgo.Describe("Pod controller", ginkgo.Ordered, ginkgo.ContinueOnFailu
ginkgo.By("checking that the pod group is not finalized if the group has failed", func() {
util.SetPodsPhase(ctx, k8sClient, corev1.PodFailed, pod1, pod2)

gomega.Consistently(func(g gomega.Gomega) []string {
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, pod1LookupKey, createdPod)).To(gomega.Succeed())
return createdPod.Finalizers
}, util.ConsistentDuration, util.Interval).Should(gomega.ContainElement(constants.ManagedByKueueLabel),
"Pod should have finalizer")
g.Expect(createdPod.Finalizers).Should(gomega.ContainElement(constants.ManagedByKueueLabel), "Pod should have finalizer")
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())

gomega.Consistently(func(g gomega.Gomega) []string {
gomega.Consistently(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, pod2LookupKey, createdPod)).To(gomega.Succeed())
return createdPod.Finalizers
}, util.ConsistentDuration, util.Interval).Should(gomega.ContainElement(constants.ManagedByKueueLabel),
"Pod should have finalizer")
g.Expect(createdPod.Finalizers).Should(gomega.ContainElement(constants.ManagedByKueueLabel), "Pod should have finalizer")
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

// Create replacement pod with 'retriable-in-group' = false annotation
Expand Down
16 changes: 8 additions & 8 deletions test/integration/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1721,11 +1721,11 @@ var _ = ginkgo.Describe("Scheduler", func() {
util.ExpectWorkloadsToHaveQuotaReservation(ctx, k8sClient, strictFIFOClusterQ.Name, wl1)
util.ExpectWorkloadsToBePending(ctx, k8sClient, wl2)
// wl3 doesn't even get a scheduling attempt, so can't check for conditions.
gomega.Consistently(func() bool {
gomega.Consistently(func(g gomega.Gomega) {
lookupKey := types.NamespacedName{Name: wl3.Name, Namespace: wl3.Namespace}
gomega.Expect(k8sClient.Get(ctx, lookupKey, wl3)).Should(gomega.Succeed())
return !workload.HasQuotaReservation(wl3)
}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
g.Expect(k8sClient.Get(ctx, lookupKey, wl3)).Should(gomega.Succeed())
g.Expect(workload.HasQuotaReservation(wl3)).Should(gomega.BeFalse())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
util.ExpectPendingWorkloadsMetric(strictFIFOClusterQ, 2, 0)
util.ExpectReservingActiveWorkloadsMetric(strictFIFOClusterQ, 1)
util.ExpectAdmittedWorkloadsTotalMetric(strictFIFOClusterQ, 1)
Expand Down Expand Up @@ -1860,11 +1860,11 @@ var _ = ginkgo.Describe("Scheduler", func() {

ginkgo.By("Delete clusterQueue")
gomega.Expect(util.DeleteObject(ctx, k8sClient, cq)).To(gomega.Succeed())
gomega.Consistently(func() []string {
gomega.Consistently(func(g gomega.Gomega) {
var newCQ kueue.ClusterQueue
gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &newCQ)).To(gomega.Succeed())
return newCQ.GetFinalizers()
}, util.ConsistentDuration, util.Interval).Should(gomega.Equal([]string{kueue.ResourceInUseFinalizerName}))
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &newCQ)).To(gomega.Succeed())
g.Expect(newCQ.GetFinalizers()).Should(gomega.Equal([]string{kueue.ResourceInUseFinalizerName}))
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())

ginkgo.By("New created workloads should be frozen")
wl2 := testing.MakeWorkload("workload2", ns.Name).Queue(queue.Name).Obj()
Expand Down
26 changes: 11 additions & 15 deletions test/integration/scheduler/workload_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,11 +639,11 @@ var _ = ginkgo.Describe("Workload controller with scheduler", func() {
Obj()
gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())

gomega.Consistently(func(g gomega.Gomega) bool {
gomega.Consistently(func(g gomega.Gomega) {
read := kueue.Workload{}
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read)).Should(gomega.Succeed())
return workload.HasQuotaReservation(&read)
}, util.ConsistentDuration, util.Interval).Should(gomega.BeFalse())
g.Expect(workload.HasQuotaReservation(&read)).Should(gomega.BeFalse())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("Verify resourceRequests is not transformed", func() {
Expand Down Expand Up @@ -706,13 +706,11 @@ var _ = ginkgo.Describe("Workload controller with scheduler", func() {
Obj()
gomega.Expect(k8sClient.Create(ctx, wl2)).To(gomega.Succeed())

gomega.Consistently(func() bool {
gomega.Consistently(func(g gomega.Gomega) {
read := kueue.Workload{}
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
return false
}
return workload.HasQuotaReservation(&read)
}, util.ConsistentDuration, util.Interval).Should(gomega.BeFalse())
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read)).Should(gomega.Succeed())
g.Expect(workload.HasQuotaReservation(&read)).Should(gomega.BeFalse())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("Decreasing the runtimeClass", func() {
Expand Down Expand Up @@ -793,13 +791,11 @@ var _ = ginkgo.Describe("Workload controller with scheduler", func() {
Obj()
gomega.Expect(k8sClient.Create(ctx, wl2)).To(gomega.Succeed())

gomega.Consistently(func() bool {
gomega.Consistently(func(g gomega.Gomega) {
read := kueue.Workload{}
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
return false
}
return workload.HasQuotaReservation(&read)
}, util.ConsistentDuration, util.Interval).Should(gomega.BeFalse())
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read)).Should(gomega.Succeed())
g.Expect(workload.HasQuotaReservation(&read)).Should(gomega.BeFalse())
}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("Decreasing the limit's default", func() {
Expand Down

0 comments on commit f892e51

Please sign in to comment.