From d150863fcf1d4400dae0c9d0fa0ac17e67fc1708 Mon Sep 17 00:00:00 2001 From: Talor Itzhak Date: Wed, 15 May 2024 18:33:40 +0300 Subject: [PATCH] e2e:config: adapt suite for hypershift Adding the relevant code for configuring performanceProfile over hypershift Signed-off-by: Talor Itzhak --- Makefile | 1 + .../functests/0_config/config.go | 133 +++++++++++++----- .../functests/utils/client/clients.go | 10 ++ .../functests/utils/nodepools/nodepools.go | 42 ++++++ 4 files changed, 149 insertions(+), 37 deletions(-) create mode 100644 test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go diff --git a/Makefile b/Makefile index 86dd5e06d0..b178c5697d 100644 --- a/Makefile +++ b/Makefile @@ -257,6 +257,7 @@ pao-functests-mixedcpus: pao-functests-hypershift: @echo "Cluster Version" hack/show-cluster-version.sh + hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config" -p "-vv -r --fail-fast --flake-attempts=2 --junit-report=report.xml" -m "Running Functional Tests over Hypershift" .PHONY: cluster-clean-pao cluster-clean-pao: diff --git a/test/e2e/performanceprofile/functests/0_config/config.go b/test/e2e/performanceprofile/functests/0_config/config.go index c8c660ba99..9f284dc0a1 100644 --- a/test/e2e/performanceprofile/functests/0_config/config.go +++ b/test/e2e/performanceprofile/functests/0_config/config.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" mcv1 "github.com/openshift/api/machineconfiguration/v1" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" @@ -30,8 +31,10 @@ import ( testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" ) @@ -39,7 +42,7 @@ var RunningOnSingleNode bool var _ = Describe("[performance][config] Performance configuration", Ordered, func() { - testutils.CustomBeforeAll(func() { + BeforeAll(func() { isSNO, err := cluster.IsSingleNode() Expect(err).ToNot(HaveOccurred()) RunningOnSingleNode = isSNO @@ -64,19 +67,11 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun profileAlreadyExists = true } - By("Getting MCP for profile") - mcpLabel := profile.GetMachineConfigLabel(performanceProfile) - key, value := components.GetFirstKeyAndValue(mcpLabel) - mcpsByLabel, err := mcps.GetByLabel(key, value) - Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value) - Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel))) - performanceMCP := &mcpsByLabel[0] - if !discovery.Enabled() { By("Creating the PerformanceProfile") // this might fail while the operator is still being deployed and the CRD does not exist yet Eventually(func() error { - err := testclient.Client.Create(context.TODO(), performanceProfile) + err := testclient.ControlPlaneClient.Create(context.TODO(), performanceProfile) if errors.IsAlreadyExists(err) { testlog.Warning(fmt.Sprintf("A PerformanceProfile with name %s already exists! If created externally, tests might have unexpected behaviour", performanceProfile.Name)) profileAlreadyExists = true @@ -85,36 +80,12 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun return err }, cluster.ComputeTestTimeout(15*time.Minute, RunningOnSingleNode), 15*time.Second).ShouldNot(HaveOccurred(), "Failed creating the performance profile") } - - if !performanceMCP.Spec.Paused { - By("MCP is already unpaused") - } else { - By("Unpausing the MCP") - Expect(testclient.Client.Patch(context.TODO(), performanceMCP, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)), - ), - )).ToNot(HaveOccurred(), "Failed unpausing MCP") - } - - By("Waiting for the MCP to pick the PerformanceProfile's MC") - mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile) - - // If the profile is already there, it's likely to have been through the updating phase, so we only - // wait for updated. - if !profileAlreadyExists { - By("Waiting for MCP starting to update") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - } - By("Waiting for MCP being updated") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - Expect(testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(performanceProfile), performanceProfile)) + unpauseMCP(context.TODO(), performanceProfile, profileAlreadyExists) + attachProfileToNodePool(context.TODO(), performanceProfile, profileAlreadyExists) + Expect(testclient.ControlPlaneClient.Get(context.TODO(), client.ObjectKeyFromObject(performanceProfile), performanceProfile)) By("Printing the updated profile") format.Object(performanceProfile, 2) }) - }) func externalPerformanceProfile(performanceManifest string) (*performancev2.PerformanceProfile, error) { @@ -229,3 +200,91 @@ func testProfile() (*performancev2.PerformanceProfile, error) { } return profile, nil } + +func unpauseMCP(ctx context.Context, performanceProfile *performancev2.PerformanceProfile, profileAlreadyExists bool) { + GinkgoHelper() + // no MCPs on hypershift + if hypershift.IsHypershiftCluster() { + return + } + By("Getting MCP for profile") + mcpLabel := profile.GetMachineConfigLabel(performanceProfile) + key, value := components.GetFirstKeyAndValue(mcpLabel) + mcpsByLabel, err := mcps.GetByLabel(key, value) + Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value) + Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel))) + performanceMCP := &mcpsByLabel[0] + + if !performanceMCP.Spec.Paused { + By("MCP is already unpaused") + } else { + By("Unpausing the MCP") + Expect(testclient.ControlPlaneClient.Patch(ctx, performanceMCP, + client.RawPatch( + types.JSONPatchType, + []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)), + ), + )).ToNot(HaveOccurred(), "Failed unpausing MCP") + } + + By("Waiting for the MCP to pick the PerformanceProfile's MC") + mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile) + + // If the profile is already there, it's likely to have been through the updating phase, so we only + // wait for updated. + if !profileAlreadyExists { + By("Waiting for MCP starting to update") + mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue) + } + By("Waiting for MCP being updated") + mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue) +} + +func attachProfileToNodePool(ctx context.Context, performanceProfile *performancev2.PerformanceProfile, profileAlreadyExists bool) { + GinkgoHelper() + // no NodePools on none-hypershift + if !hypershift.IsHypershiftCluster() { + return + } + // make debugging easier + printEnvs() + npList := &hypershiftv1beta1.NodePoolList{} + Expect(testclient.ControlPlaneClient.List(ctx, npList)).To(Succeed()) + hostedClusterName, err := hypershift.GetHostedClusterName() + Expect(err).ToNot(HaveOccurred()) + var np *hypershiftv1beta1.NodePool + for i := 0; i < len(npList.Items); i++ { + np = &npList.Items[i] + if np.Spec.ClusterName == hostedClusterName { + break + } + } + Expect(np).ToNot(BeNil(), "failed to find nodePool associated with cluster %q; existing nodePools are: %v", hostedClusterName, npList.Items) + np.Spec.TuningConfig = []corev1.LocalObjectReference{{Name: performanceProfile.Name}} + Expect(testclient.ControlPlaneClient.Update(ctx, np)).To(Succeed()) + key := client.ObjectKeyFromObject(np) + // if the profile exists, don't wait for nodePool to get into updating state + if !profileAlreadyExists { + err = nodepools.WaitForUpdatingConfig(ctx, testclient.ControlPlaneClient, np.Name, np.Namespace) + Expect(err).ToNot(HaveOccurred(), "nodePool %q is not in UpdatingConfig state", key.String()) + } + mngClusterNamespace, err := hypershift.GetManagementClusterNamespace() + Expect(err).ToNot(HaveOccurred()) + err = nodepools.WaitForConfigToBeReady(ctx, testclient.ControlPlaneClient, hostedClusterName, mngClusterNamespace) + Expect(err).ToNot(HaveOccurred(), "nodePool %q config is not ready", key.String()) +} + +func printEnvs() { + testlog.Info("Print hypershift CI info") + name, _ := os.LookupEnv(hypershift.HostedClusterNameEnv) + testlog.Infof("%s=%s", hypershift.HostedClusterNameEnv, name) + + v, _ := hypershift.GetManagementClusterNamespace() + testlog.Infof("%s=%s", hypershift.ManagementClusterNamespaceEnv, v) + + kcPath, _ := os.LookupEnv(hypershift.ManagementClusterKubeConfigEnv) + testlog.Infof("%s=%s", hypershift.ManagementClusterNamespaceEnv, kcPath) + + kcPath, _ = os.LookupEnv(hypershift.HostedClusterKubeConfigEnv) + testlog.Infof("%s=%s", hypershift.HostedClusterKubeConfigEnv, kcPath) +} diff --git a/test/e2e/performanceprofile/functests/utils/client/clients.go b/test/e2e/performanceprofile/functests/utils/client/clients.go index 48451897a6..e5210875cf 100644 --- a/test/e2e/performanceprofile/functests/utils/client/clients.go +++ b/test/e2e/performanceprofile/functests/utils/client/clients.go @@ -10,6 +10,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog" + apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -23,6 +24,7 @@ import ( performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" hypershiftutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" ) var ( @@ -70,6 +72,14 @@ func init() { klog.Exit(err.Error()) } + if err := hypershiftv1beta1.AddToScheme(scheme.Scheme); err != nil { + klog.Exit(err.Error()) + } + + if err := apiv1beta1.AddToScheme(scheme.Scheme); err != nil { + klog.Exit(err.Error()) + } + var err error Client, err = newClient() if err != nil { diff --git a/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go b/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go new file mode 100644 index 0000000000..e063194f55 --- /dev/null +++ b/test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go @@ -0,0 +1,42 @@ +package nodepools + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func WaitForUpdatingConfig(ctx context.Context, c client.Client, NpName, namespace string) error { + return waitForCondition(ctx, c, NpName, namespace, func(cond hypershiftv1beta1.NodePoolCondition) (done bool, err error) { + if cond.Type == "UpdatingConfig" { + return cond.Status == corev1.ConditionTrue, nil + } + return false, nil + }) +} + +func WaitForConfigToBeReady(ctx context.Context, c client.Client, NpName, namespace string) error { + return waitForCondition(ctx, c, NpName, namespace, func(cond hypershiftv1beta1.NodePoolCondition) (done bool, err error) { + if cond.Type == "UpdatingConfig" { + return cond.Status == corev1.ConditionFalse, nil + } + return false, nil + }) +} + +func waitForCondition(ctx context.Context, c client.Client, NpName, namespace string, conditionFunc func(hypershiftv1beta1.NodePoolCondition) (done bool, err error)) error { + return wait.PollUntilContextTimeout(ctx, time.Second*10, time.Minute*20, false, func(ctx context.Context) (done bool, err error) { + np := &hypershiftv1beta1.NodePool{} + key := client.ObjectKey{Name: NpName, Namespace: namespace} + err = c.Get(ctx, key, np) + for _, cond := range np.Status.Conditions { + return conditionFunc(cond) + } + return false, nil + }) +}