Skip to content

Commit

Permalink
e2e: centralize utilities
Browse files Browse the repository at this point in the history
move utility function in the main suite file,
to foster code reuse.

Trivial code movement, no expected changes in behavior.

Signed-off-by: Francesco Romani <[email protected]>
  • Loading branch information
ffromani committed Mar 5, 2024
1 parent c2309ee commit 1a8759d
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 110 deletions.
116 changes: 114 additions & 2 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,22 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"

"github.com/go-logr/logr"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/yaml"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
k8swait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"

"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
Expand All @@ -45,8 +50,14 @@ import (

"github.com/k8stopologyawareschedwg/deployer/pkg/clientutil"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/platform"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/wait"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests/sched"
"github.com/k8stopologyawareschedwg/deployer/pkg/options"
"github.com/k8stopologyawareschedwg/deployer/pkg/stringify"
"github.com/k8stopologyawareschedwg/deployer/pkg/validator"
e2epods "github.com/k8stopologyawareschedwg/deployer/test/e2e/utils/pods"
)

var (
Expand Down Expand Up @@ -132,7 +143,7 @@ func getNodeResourceTopology(tc topologyclientset.Interface, name string, filter
var err error
var nrt *v1alpha2.NodeResourceTopology
fmt.Fprintf(ginkgo.GinkgoWriter, "looking for noderesourcetopology %q\n", name)
err = wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) {
err = k8swait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) {
nrt, err = tc.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
Expand Down Expand Up @@ -255,3 +266,104 @@ func NullEnv() *deployer.Environment {
}
return &env
}

func dumpSchedulerPods() {
ns, err := manifests.Namespace(manifests.ComponentSchedulerPlugin)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

// TODO: autodetect the platform
mfs, err := sched.GetManifests(platform.Kubernetes, ns.Name)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
mfs, err = mfs.Render(logr.Discard(), options.Scheduler{
Replicas: int32(1),
})
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

cli, err := clientutil.New()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

k8sCli, err := clientutil.NewK8s()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

ctx := context.Background()

pods, err := e2epods.GetByDeployment(cli, ctx, *mfs.DPScheduler)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

klog.Warning(">>> scheduler pod status begin:\n")
for idx := range pods {
pod := &pods[idx]

// TODO
pod.ManagedFields = nil
// TODO

data, err := yaml.Marshal(pod)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

klog.Warningf("%s\n---\n", string(data))

e2epods.LogEventsForPod(k8sCli, ctx, pod.Namespace, pod.Name)
klog.Warningf("---\n")
}
klog.Warning(">>> scheduler pod status end\n")
}

func expectSchedulerRunning() {
ns, err := manifests.Namespace(manifests.ComponentSchedulerPlugin)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

cli, err := clientutil.New()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

ctx := context.Background()

ginkgo.By("checking that scheduler plugin is configured")

confMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: "scheduler-config", // TODO: duplicate from YAML
},
}
err = cli.Get(ctx, client.ObjectKeyFromObject(&confMap), &confMap)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
gomega.ExpectWithOffset(1, confMap.Data).ToNot(gomega.BeNil(), "empty config map for scheduler config")

data, ok := confMap.Data[manifests.SchedulerConfigFileName]
gomega.ExpectWithOffset(1, ok).To(gomega.BeTrue(), "empty config data for %q", manifests.SchedulerConfigFileName)

allParams, err := manifests.DecodeSchedulerProfilesFromData([]byte(data))
gomega.ExpectWithOffset(1, len(allParams)).To(gomega.Equal(1), "unexpected params: %#v", allParams)

params := allParams[0] // TODO: smarter find
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
gomega.ExpectWithOffset(1, params.Cache).ToNot(gomega.BeNil(), "no data for scheduler cache config")
gomega.ExpectWithOffset(1, params.Cache.ResyncPeriodSeconds).ToNot(gomega.BeNil(), "no data for scheduler cache resync period")

ginkgo.By("checking that scheduler plugin is running")

ginkgo.By("checking that topo-aware-scheduler pod is running")
// TODO: autodetect the platform
mfs, err := sched.GetManifests(platform.Kubernetes, ns.Name)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
mfs, err = mfs.Render(logr.Discard(), options.Scheduler{
Replicas: int32(1),
})
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

var wg sync.WaitGroup
for _, dp := range []*appsv1.Deployment{
mfs.DPScheduler,
mfs.DPController,
} {
wg.Add(1)
go func(dp *appsv1.Deployment) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
_, err = wait.With(cli, logr.Discard()).Interval(10*time.Second).Timeout(3*time.Minute).ForDeploymentComplete(ctx, dp)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
}(dp)
}
wg.Wait()
}
108 changes: 0 additions & 108 deletions test/e2e/positive.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"time"

"github.com/go-logr/logr"
Expand All @@ -33,13 +32,8 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"

"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"

Expand All @@ -48,7 +42,6 @@ import (
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/platform"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/platform/detect"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/updaters"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/wait"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests/nfd"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests/rte"
Expand Down Expand Up @@ -506,104 +499,3 @@ var _ = ginkgo.Describe("[PositiveFlow] Deployer partial execution", func() {
})
})
})

func dumpSchedulerPods() {
ns, err := manifests.Namespace(manifests.ComponentSchedulerPlugin)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

// TODO: autodetect the platform
mfs, err := sched.GetManifests(platform.Kubernetes, ns.Name)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
mfs, err = mfs.Render(logr.Discard(), options.Scheduler{
Replicas: int32(1),
})
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

cli, err := clientutil.New()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

k8sCli, err := clientutil.NewK8s()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

ctx := context.Background()

pods, err := e2epods.GetByDeployment(cli, ctx, *mfs.DPScheduler)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

klog.Warning(">>> scheduler pod status begin:\n")
for idx := range pods {
pod := &pods[idx]

// TODO
pod.ManagedFields = nil
// TODO

data, err := yaml.Marshal(pod)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

klog.Warningf("%s\n---\n", string(data))

e2epods.LogEventsForPod(k8sCli, ctx, pod.Namespace, pod.Name)
klog.Warningf("---\n")
}
klog.Warning(">>> scheduler pod status end\n")
}

func expectSchedulerRunning() {
ns, err := manifests.Namespace(manifests.ComponentSchedulerPlugin)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

cli, err := clientutil.New()
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

ctx := context.Background()

ginkgo.By("checking that scheduler plugin is configured")

confMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: "scheduler-config", // TODO: duplicate from YAML
},
}
err = cli.Get(ctx, client.ObjectKeyFromObject(&confMap), &confMap)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
gomega.ExpectWithOffset(1, confMap.Data).ToNot(gomega.BeNil(), "empty config map for scheduler config")

data, ok := confMap.Data[manifests.SchedulerConfigFileName]
gomega.ExpectWithOffset(1, ok).To(gomega.BeTrue(), "empty config data for %q", manifests.SchedulerConfigFileName)

allParams, err := manifests.DecodeSchedulerProfilesFromData([]byte(data))
gomega.ExpectWithOffset(1, len(allParams)).To(gomega.Equal(1), "unexpected params: %#v", allParams)

params := allParams[0] // TODO: smarter find
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
gomega.ExpectWithOffset(1, params.Cache).ToNot(gomega.BeNil(), "no data for scheduler cache config")
gomega.ExpectWithOffset(1, params.Cache.ResyncPeriodSeconds).ToNot(gomega.BeNil(), "no data for scheduler cache resync period")

ginkgo.By("checking that scheduler plugin is running")

ginkgo.By("checking that topo-aware-scheduler pod is running")
// TODO: autodetect the platform
mfs, err := sched.GetManifests(platform.Kubernetes, ns.Name)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
mfs, err = mfs.Render(logr.Discard(), options.Scheduler{
Replicas: int32(1),
})
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())

var wg sync.WaitGroup
for _, dp := range []*appsv1.Deployment{
mfs.DPScheduler,
mfs.DPController,
} {
wg.Add(1)
go func(dp *appsv1.Deployment) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
_, err = wait.With(cli, logr.Discard()).Interval(10*time.Second).Timeout(3*time.Minute).ForDeploymentComplete(ctx, dp)
gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred())
}(dp)
}
wg.Wait()
}

0 comments on commit 1a8759d

Please sign in to comment.