From 61c94c4978dd8257a6e690b57df2a6f53dc8781b Mon Sep 17 00:00:00 2001 From: Sargun Narula <57625241+SargunNarula@users.noreply.github.com> Date: Thu, 19 Dec 2024 11:27:35 +0530 Subject: [PATCH] E2E: Add hypershift support to container runtime config tests (#1229) Revert: ContainerRuntimeConfig handling for test purposes This reverts the removal of ContainerRuntimeConfig handling in the switch case, which inadvertently impacted e2e tests. While the deletion was correct from the controller's perspective (as we no longer had to interact with ContainerRuntimeConfig), the function `GetObjectConfigMapDataKey` is still used for testing purposes. To ensure e2e tests function correctly and to continue testing the effects of ContainerRuntimeConfig, this change is being reverted. Signed-off-by: Sargun Narula --- .../hypershift/hypershift.go | 3 +- .../2_performance_update/updating_profile.go | 85 ++++++++++++++----- .../functests/utils/nodepools/nodepools.go | 48 +++++++++++ 3 files changed, 114 insertions(+), 22 deletions(-) diff --git a/pkg/performanceprofile/controller/performanceprofile/hypershift/hypershift.go b/pkg/performanceprofile/controller/performanceprofile/hypershift/hypershift.go index 7809a49434..2ca0d89388 100644 --- a/pkg/performanceprofile/controller/performanceprofile/hypershift/hypershift.go +++ b/pkg/performanceprofile/controller/performanceprofile/hypershift/hypershift.go @@ -103,7 +103,8 @@ func GetObjectConfigMapDataKey(obj runtime.Object) string { case *performancev2.PerformanceProfile, *performancev2.PerformanceProfileList, *tunedv1.Tuned, *tunedv1.TunedList: return hypershiftconsts.TuningKey case *machineconfigv1.KubeletConfig, *machineconfigv1.KubeletConfigList, - *machineconfigv1.MachineConfig, *machineconfigv1.MachineConfigList: + *machineconfigv1.MachineConfig, *machineconfigv1.MachineConfigList, + *machineconfigv1.ContainerRuntimeConfig, *machineconfigv1.ContainerRuntimeConfigList: return hypershiftconsts.ConfigKey default: return "" diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 89d3a4c0ae..bf02c77ead 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -52,6 +52,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance var workerRTNodes []corev1.Node var profile, initialProfile *performancev2.PerformanceProfile var poolName string + var np *hypershiftv1beta1.NodePool var err error chkCmdLine := []string{"cat", "/proc/cmdline"} @@ -434,7 +435,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance By("Creating new MachineConfigPool") mcp = mcps.New(newRole, newNodeSelector) - err = testclient.ControlPlaneClient.Create(context.TODO(), mcp) + err = testclient.Client.Create(context.TODO(), mcp) Expect(err).ToNot(HaveOccurred()) By("Updating Node Selector performance profile") @@ -1152,8 +1153,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance }) }) - // TODO - Need specific container runtime utility for Hypershift - Context("ContainerRuntimeConfig", Ordered, Label(string(label.Tier2), string(label.OpenShift)), func() { + Context("ContainerRuntimeConfig", Ordered, Label(string(label.Tier2)), func() { var ctrcfg *machineconfigv1.ContainerRuntimeConfig const ContainerRuntimeConfigName = "ctrcfg-test" mcp := &machineconfigv1.MachineConfigPool{} @@ -1162,12 +1162,16 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance key := types.NamespacedName{ Name: poolName, } - Expect(testclient.Client.Get(context.TODO(), key, mcp)).ToNot(HaveOccurred(), "cannot get MCP %q", poolName) By("checking if ContainerRuntimeConfig object already exists") - ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp) - Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from profile %q mcp %q", profile.Name, mcp.Name) - if ctrcfg != nil { - Skip(fmt.Sprintf("ContainerRuntimeConfig %s exist in the cluster and not expected", ctrcfg.Name)) + if !hypershift.IsHypershiftCluster() { + Expect(testclient.ControlPlaneClient.Get(context.TODO(), key, mcp)).ToNot(HaveOccurred(), "cannot get MCP %q", poolName) + ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp) + Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from mcp %q", mcp.Name) + Expect(ctrcfg).To(BeNil(), "ContainerRuntimeConfig should not exist for MCP %q", mcp.Name) + } else { + ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp) + Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from profile %q", profile.Name) + Expect(ctrcfg).To(BeNil(), "ContainerRuntimeConfig should not exist for profile %q", profile.Name) } testpodTemplate = pods.GetTestPod() testpodTemplate.Namespace = testutils.NamespaceTesting @@ -1179,28 +1183,63 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance var expectedRuntime string if withCTRCfg { ctrcfg = newContainerRuntimeConfig(ContainerRuntimeConfigName, profile, mcp) - By(fmt.Sprintf("creating ContainerRuntimeConfig %q", ctrcfg.Name)) - Expect(testclient.Client.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg %#v", ctrcfg) + if hypershift.IsHypershiftCluster() { + By(fmt.Sprintf("creating ContainerRuntimeConfig configmap %q", ctrcfg.Name)) + Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg configmap %#v", ctrcfg.Name) + + hostedClusterName, err := hypershift.GetHostedClusterName() + Expect(err).ToNot(HaveOccurred()) + np, err = nodepools.GetByClusterName(context.TODO(), testclient.ControlPlaneClient, hostedClusterName) + Expect(err).ToNot(HaveOccurred()) + + By("Attaching the Config object to the nodepool") + Expect(nodepools.AttachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg)).To(Succeed()) + + By("Waiting for the nodepool configuration to start updating") + err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for the nodepool configuration to be ready") + err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) + Expect(err).ToNot(HaveOccurred()) + } else { + By(fmt.Sprintf("creating ContainerRuntimeConfig %q", ctrcfg.Name)) + Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg %#v", ctrcfg) - DeferCleanup(func() { - Expect(testclient.Client.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg %#v", ctrcfg) By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName)) mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus) By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName)) mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus) + } + DeferCleanup(func() { + if hypershift.IsHypershiftCluster() { + By("Deattaching the Config object from the nodepool") + Expect(nodepools.DeattachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg)).To(Succeed()) + + Expect(testclient.ControlPlaneClient.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg configmap %#v", ctrcfg) + + By("Waiting for the nodepool configuration to start updating") + err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for the nodepool configuration to be ready") + err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(testclient.ControlPlaneClient.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg %#v", ctrcfg) + By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName)) + mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus) + By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName)) + mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus) + } }) - - By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus) - By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus) } for i := 0; i < len(workerRTNodes); i++ { By("Determining the default container runtime used in the node") tunedPod, err := tuned.GetPod(context.TODO(), &workerRTNodes[i]) Expect(err).ToNot(HaveOccurred()) - expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.Client, tunedPod) + expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, tunedPod) Expect(err).ToNot(HaveOccurred()) testlog.Infof("Container runtime used for the node: %s", expectedRuntime) @@ -1210,15 +1249,15 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance testpod.Spec.NodeName = workerRTNodes[i].Name testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNodes[i].Name} By(fmt.Sprintf("creating a test pod using high-performance runtime class on node %s", workerRTNodes[i].Name)) - Expect(testclient.Client.Create(context.TODO(), testpod)).ToNot(HaveOccurred()) + Expect(testclient.DataPlaneClient.Create(context.TODO(), testpod)).ToNot(HaveOccurred()) DeferCleanup(func() { By(fmt.Sprintf("deleting the test pod from node %s", workerRTNodes[i].Name)) - Expect(testclient.Client.Delete(context.TODO(), testpod)).ToNot(HaveOccurred()) + Expect(testclient.DataPlaneClient.Delete(context.TODO(), testpod)).ToNot(HaveOccurred()) Expect(pods.WaitForDeletion(context.TODO(), testpod, pods.DefaultDeletionTimeout*time.Second)).ToNot(HaveOccurred()) }) testpod, err = pods.WaitForCondition(context.TODO(), client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) Expect(err).ToNot(HaveOccurred()) - runtimeType, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.Client, testpod) + runtimeType, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, testpod) Expect(err).ToNot(HaveOccurred()) testlog.Infof("Container runtime used for the test pod: %s", runtimeType) Expect(runtimeType).To(Equal(expectedRuntime)) @@ -1324,6 +1363,10 @@ func removeLabels(nodeSelector map[string]string, targetNode *corev1.Node) { func newContainerRuntimeConfig(name string, profile *performancev2.PerformanceProfile, profileMCP *machineconfigv1.MachineConfigPool) *machineconfigv1.ContainerRuntimeConfig { return &machineconfigv1.ContainerRuntimeConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "ContainerRuntimeConfig", + APIVersion: machineconfigv1.GroupVersion.String(), + }, ObjectMeta: metav1.ObjectMeta{ Name: name, }, diff --git a/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go b/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go index 7fd6b510e7..fe61fb868e 100644 --- a/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go +++ b/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go @@ -81,6 +81,54 @@ func GetByClusterName(ctx context.Context, c client.Client, hostedClusterName st return np, nil } +func AttachConfigObject(ctx context.Context, cli client.Client, object client.Object) error { + np, err := GetNodePool(ctx, cli) + if err != nil { + return err + } + + return AttachConfigObjectToNodePool(ctx, cli, object, np) +} + +func AttachConfigObjectToNodePool(ctx context.Context, cli client.Client, object client.Object, np *hypershiftv1beta1.NodePool) error { + var err error + updatedConfig := []corev1.LocalObjectReference{{Name: object.GetName()}} + for i := range np.Spec.Config { + Config := np.Spec.Config[i] + if Config.Name != object.GetName() { + updatedConfig = append(updatedConfig, Config) + } + } + np.Spec.Config = updatedConfig + if err = cli.Update(ctx, np); err != nil { + return err + } + return nil +} + +func DeattachConfigObject(ctx context.Context, cli client.Client, object client.Object) error { + np, err := GetNodePool(ctx, cli) + if err != nil { + return err + } + + return DeattachConfigObjectFromNodePool(ctx, cli, object, np) +} + +func DeattachConfigObjectFromNodePool(ctx context.Context, cli client.Client, object client.Object, np *hypershiftv1beta1.NodePool) error { + var err error + for i := range np.Spec.Config { + if np.Spec.Config[i].Name == object.GetName() { + np.Spec.Config = append(np.Spec.Config[:i], np.Spec.Config[i+1:]...) + break + } + } + if err = cli.Update(ctx, np); err != nil { + return err + } + return nil +} + // AttachTuningObject is attaches a tuning object into the nodepool associated with the hosted-cluster // The function is idempotent func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {