Skip to content

Commit

Permalink
e2e:config: adapt suite for hypershift
Browse files Browse the repository at this point in the history
Adding the relevant code for configuring performanceProfile over hypershift

Signed-off-by: Talor Itzhak <[email protected]>
  • Loading branch information
Tal-or committed May 19, 2024
1 parent a91f175 commit d150863
Show file tree
Hide file tree
Showing 4 changed files with 149 additions and 37 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ pao-functests-mixedcpus:
pao-functests-hypershift:
@echo "Cluster Version"
hack/show-cluster-version.sh
hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config" -p "-vv -r --fail-fast --flake-attempts=2 --junit-report=report.xml" -m "Running Functional Tests over Hypershift"

.PHONY: cluster-clean-pao
cluster-clean-pao:
Expand Down
133 changes: 96 additions & 37 deletions test/e2e/performanceprofile/functests/0_config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"

mcv1 "github.com/openshift/api/machineconfiguration/v1"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"

performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components"
Expand All @@ -30,16 +31,18 @@ import (
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift"
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles"
)

var RunningOnSingleNode bool

var _ = Describe("[performance][config] Performance configuration", Ordered, func() {

testutils.CustomBeforeAll(func() {
BeforeAll(func() {
isSNO, err := cluster.IsSingleNode()
Expect(err).ToNot(HaveOccurred())
RunningOnSingleNode = isSNO
Expand All @@ -64,19 +67,11 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun
profileAlreadyExists = true
}

By("Getting MCP for profile")
mcpLabel := profile.GetMachineConfigLabel(performanceProfile)
key, value := components.GetFirstKeyAndValue(mcpLabel)
mcpsByLabel, err := mcps.GetByLabel(key, value)
Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value)
Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel)))
performanceMCP := &mcpsByLabel[0]

if !discovery.Enabled() {
By("Creating the PerformanceProfile")
// this might fail while the operator is still being deployed and the CRD does not exist yet
Eventually(func() error {
err := testclient.Client.Create(context.TODO(), performanceProfile)
err := testclient.ControlPlaneClient.Create(context.TODO(), performanceProfile)
if errors.IsAlreadyExists(err) {
testlog.Warning(fmt.Sprintf("A PerformanceProfile with name %s already exists! If created externally, tests might have unexpected behaviour", performanceProfile.Name))
profileAlreadyExists = true
Expand All @@ -85,36 +80,12 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun
return err
}, cluster.ComputeTestTimeout(15*time.Minute, RunningOnSingleNode), 15*time.Second).ShouldNot(HaveOccurred(), "Failed creating the performance profile")
}

if !performanceMCP.Spec.Paused {
By("MCP is already unpaused")
} else {
By("Unpausing the MCP")
Expect(testclient.Client.Patch(context.TODO(), performanceMCP,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)),
),
)).ToNot(HaveOccurred(), "Failed unpausing MCP")
}

By("Waiting for the MCP to pick the PerformanceProfile's MC")
mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile)

// If the profile is already there, it's likely to have been through the updating phase, so we only
// wait for updated.
if !profileAlreadyExists {
By("Waiting for MCP starting to update")
mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
}
By("Waiting for MCP being updated")
mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue)

Expect(testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(performanceProfile), performanceProfile))
unpauseMCP(context.TODO(), performanceProfile, profileAlreadyExists)
attachProfileToNodePool(context.TODO(), performanceProfile, profileAlreadyExists)
Expect(testclient.ControlPlaneClient.Get(context.TODO(), client.ObjectKeyFromObject(performanceProfile), performanceProfile))
By("Printing the updated profile")
format.Object(performanceProfile, 2)
})

})

func externalPerformanceProfile(performanceManifest string) (*performancev2.PerformanceProfile, error) {
Expand Down Expand Up @@ -229,3 +200,91 @@ func testProfile() (*performancev2.PerformanceProfile, error) {
}
return profile, nil
}

func unpauseMCP(ctx context.Context, performanceProfile *performancev2.PerformanceProfile, profileAlreadyExists bool) {
GinkgoHelper()
// no MCPs on hypershift
if hypershift.IsHypershiftCluster() {
return
}
By("Getting MCP for profile")
mcpLabel := profile.GetMachineConfigLabel(performanceProfile)
key, value := components.GetFirstKeyAndValue(mcpLabel)
mcpsByLabel, err := mcps.GetByLabel(key, value)
Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value)
Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel)))
performanceMCP := &mcpsByLabel[0]

if !performanceMCP.Spec.Paused {
By("MCP is already unpaused")
} else {
By("Unpausing the MCP")
Expect(testclient.ControlPlaneClient.Patch(ctx, performanceMCP,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)),
),
)).ToNot(HaveOccurred(), "Failed unpausing MCP")
}

By("Waiting for the MCP to pick the PerformanceProfile's MC")
mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile)

// If the profile is already there, it's likely to have been through the updating phase, so we only
// wait for updated.
if !profileAlreadyExists {
By("Waiting for MCP starting to update")
mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
}
By("Waiting for MCP being updated")
mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
}

func attachProfileToNodePool(ctx context.Context, performanceProfile *performancev2.PerformanceProfile, profileAlreadyExists bool) {
GinkgoHelper()
// no NodePools on none-hypershift
if !hypershift.IsHypershiftCluster() {
return
}
// make debugging easier
printEnvs()
npList := &hypershiftv1beta1.NodePoolList{}
Expect(testclient.ControlPlaneClient.List(ctx, npList)).To(Succeed())
hostedClusterName, err := hypershift.GetHostedClusterName()
Expect(err).ToNot(HaveOccurred())
var np *hypershiftv1beta1.NodePool
for i := 0; i < len(npList.Items); i++ {
np = &npList.Items[i]
if np.Spec.ClusterName == hostedClusterName {
break
}
}
Expect(np).ToNot(BeNil(), "failed to find nodePool associated with cluster %q; existing nodePools are: %v", hostedClusterName, npList.Items)
np.Spec.TuningConfig = []corev1.LocalObjectReference{{Name: performanceProfile.Name}}
Expect(testclient.ControlPlaneClient.Update(ctx, np)).To(Succeed())
key := client.ObjectKeyFromObject(np)
// if the profile exists, don't wait for nodePool to get into updating state
if !profileAlreadyExists {
err = nodepools.WaitForUpdatingConfig(ctx, testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred(), "nodePool %q is not in UpdatingConfig state", key.String())
}
mngClusterNamespace, err := hypershift.GetManagementClusterNamespace()
Expect(err).ToNot(HaveOccurred())
err = nodepools.WaitForConfigToBeReady(ctx, testclient.ControlPlaneClient, hostedClusterName, mngClusterNamespace)
Expect(err).ToNot(HaveOccurred(), "nodePool %q config is not ready", key.String())
}

func printEnvs() {
testlog.Info("Print hypershift CI info")
name, _ := os.LookupEnv(hypershift.HostedClusterNameEnv)
testlog.Infof("%s=%s", hypershift.HostedClusterNameEnv, name)

v, _ := hypershift.GetManagementClusterNamespace()
testlog.Infof("%s=%s", hypershift.ManagementClusterNamespaceEnv, v)

kcPath, _ := os.LookupEnv(hypershift.ManagementClusterKubeConfigEnv)
testlog.Infof("%s=%s", hypershift.ManagementClusterNamespaceEnv, kcPath)

kcPath, _ = os.LookupEnv(hypershift.HostedClusterKubeConfigEnv)
testlog.Infof("%s=%s", hypershift.HostedClusterKubeConfigEnv, kcPath)
}
10 changes: 10 additions & 0 deletions test/e2e/performanceprofile/functests/utils/client/clients.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog"

apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"

Expand All @@ -23,6 +24,7 @@ import (
performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
hypershiftutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift"
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
)

var (
Expand Down Expand Up @@ -70,6 +72,14 @@ func init() {
klog.Exit(err.Error())
}

if err := hypershiftv1beta1.AddToScheme(scheme.Scheme); err != nil {
klog.Exit(err.Error())
}

if err := apiv1beta1.AddToScheme(scheme.Scheme); err != nil {
klog.Exit(err.Error())
}

var err error
Client, err = newClient()
if err != nil {
Expand Down
42 changes: 42 additions & 0 deletions test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
package nodepools

import (
"context"
"time"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/controller-runtime/pkg/client"

hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
)

func WaitForUpdatingConfig(ctx context.Context, c client.Client, NpName, namespace string) error {
return waitForCondition(ctx, c, NpName, namespace, func(cond hypershiftv1beta1.NodePoolCondition) (done bool, err error) {
if cond.Type == "UpdatingConfig" {
return cond.Status == corev1.ConditionTrue, nil
}
return false, nil
})
}

func WaitForConfigToBeReady(ctx context.Context, c client.Client, NpName, namespace string) error {
return waitForCondition(ctx, c, NpName, namespace, func(cond hypershiftv1beta1.NodePoolCondition) (done bool, err error) {
if cond.Type == "UpdatingConfig" {
return cond.Status == corev1.ConditionFalse, nil
}
return false, nil
})
}

func waitForCondition(ctx context.Context, c client.Client, NpName, namespace string, conditionFunc func(hypershiftv1beta1.NodePoolCondition) (done bool, err error)) error {
return wait.PollUntilContextTimeout(ctx, time.Second*10, time.Minute*20, false, func(ctx context.Context) (done bool, err error) {
np := &hypershiftv1beta1.NodePool{}
key := client.ObjectKey{Name: NpName, Namespace: namespace}
err = c.Get(ctx, key, np)
for _, cond := range np.Status.Conditions {
return conditionFunc(cond)
}
return false, nil
})
}

0 comments on commit d150863

Please sign in to comment.