Skip to content

Commit

Permalink
E2E: Add hypershift support to container runtime config tests
Browse files Browse the repository at this point in the history
Signed-off-by: Sargun Narula <[email protected]>
  • Loading branch information
SargunNarula committed Dec 4, 2024
1 parent 350b36b commit 5d7fe67
Show file tree
Hide file tree
Showing 2 changed files with 141 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"k8s.io/utils/cpuset"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"

machineconfigv1 "github.com/openshift/api/machineconfiguration/v1"
performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
Expand Down Expand Up @@ -51,6 +52,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
var workerRTNodes []corev1.Node
var profile, initialProfile *performancev2.PerformanceProfile
var poolName string
var np *hypershiftv1beta1.NodePool
var err error

chkCmdLine := []string{"cat", "/proc/cmdline"}
Expand Down Expand Up @@ -432,7 +434,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
//Remove the MCP Selector if exists
if profile.Spec.MachineConfigPoolSelector != nil {
By("Removing Machine Config Selector")
Expect(testclient.Client.Patch(context.TODO(), profile,
Expect(testclient.ControlPlaneClient.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{"op": "remove", "path": "/spec/%s"}]`, "machineConfigPoolSelector")),
Expand All @@ -451,7 +453,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
Expect(err).ToNot(HaveOccurred())

By("Applying changes in performance profile and waiting until mcp will start updating")
Expect(testclient.Client.Patch(context.TODO(), profile,
Expect(testclient.ControlPlaneClient.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
Expand Down Expand Up @@ -509,7 +511,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
nodeSelector := strings.Join(selectorLabels, ",")
profile.Spec.NodeSelector = oldNodeSelector
spec, err := json.Marshal(profile.Spec)
Expect(testclient.Client.Patch(context.TODO(), profile,
Expect(testclient.ControlPlaneClient.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
Expand All @@ -522,7 +524,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
Name: profile.Name,
Namespace: profile.Namespace,
}
Expect(testclient.Client.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
Expect(testclient.ControlPlaneClient.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
var updatedSelectorLabels []string
for k, v := range updatedProfile.Spec.NodeSelector {
updatedSelectorLabels = append(updatedSelectorLabels, fmt.Sprintf(`"%s":"%s"`, k, v))
Expand All @@ -533,7 +535,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance

poolName, err = mcps.GetByProfile(updatedProfile)
Expect(err).ToNot(HaveOccurred())
Expect(testclient.Client.Delete(context.TODO(), mcp)).ToNot(HaveOccurred())
Expect(testclient.ControlPlaneClient.Delete(context.TODO(), mcp)).ToNot(HaveOccurred())
mcps.WaitForCondition(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)

// revert node label to have the expected value
Expand All @@ -548,7 +550,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
spec, err := json.Marshal(profile.Spec)
Expect(err).ToNot(HaveOccurred())

Expect(testclient.Client.Patch(context.TODO(), profile,
Expect(testclient.ControlPlaneClient.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
Expand Down Expand Up @@ -1152,8 +1154,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
})
})

// TODO - Need specific container runtime utility for Hypershift
Context("ContainerRuntimeConfig", Ordered, Label(string(label.Tier2), string(label.OpenShift)), func() {
Context("ContainerRuntimeConfig", Ordered, Label(string(label.Tier2)), func() {
var ctrcfg *machineconfigv1.ContainerRuntimeConfig
const ContainerRuntimeConfigName = "ctrcfg-test"
mcp := &machineconfigv1.MachineConfigPool{}
Expand All @@ -1162,7 +1163,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
key := types.NamespacedName{
Name: poolName,
}
Expect(testclient.Client.Get(context.TODO(), key, mcp)).ToNot(HaveOccurred(), "cannot get MCP %q", poolName)
if !hypershift.IsHypershiftCluster() {
Expect(testclient.ControlPlaneClient.Get(context.TODO(), key, mcp)).ToNot(HaveOccurred(), "cannot get MCP %q", poolName)
}
By("checking if ContainerRuntimeConfig object already exists")
ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp)
Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from profile %q mcp %q", profile.Name, mcp.Name)
Expand All @@ -1178,29 +1181,66 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
func(withCTRCfg bool) {
var expectedRuntime string
if withCTRCfg {
ctrcfg = newContainerRuntimeConfig(ContainerRuntimeConfigName, profile, mcp)
By(fmt.Sprintf("creating ContainerRuntimeConfig %q", ctrcfg.Name))
Expect(testclient.Client.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg %#v", ctrcfg)
if hypershift.IsHypershiftCluster() {
ctrcfg_configmap, err := containerRuntimeConfigConfigMap(ContainerRuntimeConfigName, profile, mcp)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("creating ContainerRuntimeConfig configmap %q", ctrcfg_configmap.Name))
Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg_configmap)).ToNot(HaveOccurred(), "failed to create ctrcfg configmap %#v", ctrcfg_configmap)

hostedClusterName, err := hypershift.GetHostedClusterName()
Expect(err).ToNot(HaveOccurred())
np, err = nodepools.GetByClusterName(context.TODO(), testclient.ControlPlaneClient, hostedClusterName)
Expect(err).ToNot(HaveOccurred())

By("Attaching the Config object to the nodepool")
Expect(nodepools.AttachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg_configmap)).To(Succeed())

By("Waiting for the nodepool configuration to start updating")
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Waiting for the nodepool configuration to be ready")
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())
} else {
ctrcfg = newContainerRuntimeConfig(ContainerRuntimeConfigName, profile, mcp)

By(fmt.Sprintf("creating ContainerRuntimeConfig %q", ctrcfg.Name))
Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg %#v", ctrcfg)

DeferCleanup(func() {
Expect(testclient.Client.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg %#v", ctrcfg)
By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus)
By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus)
}
DeferCleanup(func() {
Expect(testclient.ControlPlaneClient.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg %#v", ctrcfg)
if hypershift.IsHypershiftCluster() {
By("Deattaching the Config object from the nodepool")
Expect(nodepools.DeattachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg)).To(Succeed())

By("Waiting for the nodepool configuration to start updating")
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Waiting for the nodepool configuration to be ready")
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())
} else {
By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus)
By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus)
}
})

By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus)
By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName))
mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus)
}

for i := 0; i < len(workerRTNodes); i++ {
By("Determing the default container runtime used in the node")
tunedPod, err := tuned.GetPod(context.TODO(), &workerRTNodes[i])
Expect(err).ToNot(HaveOccurred())
expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.Client, tunedPod)
expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, tunedPod)
Expect(err).ToNot(HaveOccurred())
testlog.Infof("Container runtime used for the node: %s", expectedRuntime)

Expand All @@ -1210,15 +1250,15 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
testpod.Spec.NodeName = workerRTNodes[i].Name
testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNodes[i].Name}
By(fmt.Sprintf("creating a test pod using high-performance runtime class on node %s", workerRTNodes[i].Name))
Expect(testclient.Client.Create(context.TODO(), testpod)).ToNot(HaveOccurred())
Expect(testclient.DataPlaneClient.Create(context.TODO(), testpod)).ToNot(HaveOccurred())
DeferCleanup(func() {
By(fmt.Sprintf("deleting the test pod from node %s", workerRTNodes[i].Name))
Expect(testclient.Client.Delete(context.TODO(), testpod)).ToNot(HaveOccurred())
Expect(testclient.DataPlaneClient.Delete(context.TODO(), testpod)).ToNot(HaveOccurred())
Expect(pods.WaitForDeletion(context.TODO(), testpod, pods.DefaultDeletionTimeout*time.Second)).ToNot(HaveOccurred())
})
testpod, err = pods.WaitForCondition(context.TODO(), client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
Expect(err).ToNot(HaveOccurred())
runtimeType, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.Client, testpod)
runtimeType, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, testpod)
Expect(err).ToNot(HaveOccurred())
testlog.Infof("Container runtime used for the test pod: %s", runtimeType)
Expect(runtimeType).To(Equal(expectedRuntime))
Expand Down Expand Up @@ -1294,7 +1334,7 @@ func getNodeNames(nodes []corev1.Node) []string {
}

func removeLabels(nodeSelector map[string]string, targetNode *corev1.Node) {
ExpectWithOffset(1, testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(targetNode), targetNode)).ToNot(HaveOccurred())
ExpectWithOffset(1, testclient.ControlPlaneClient.Get(context.TODO(), client.ObjectKeyFromObject(targetNode), targetNode)).ToNot(HaveOccurred())
patchNode := false
for l := range nodeSelector {
if _, ok := targetNode.Labels[l]; ok {
Expand All @@ -1309,7 +1349,7 @@ func removeLabels(nodeSelector map[string]string, targetNode *corev1.Node) {
}
label, err := json.Marshal(targetNode.Labels)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
ExpectWithOffset(1, testclient.Client.Patch(context.TODO(), targetNode,
ExpectWithOffset(1, testclient.ControlPlaneClient.Patch(context.TODO(), targetNode,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/labels", "value": %s }]`, label)),
Expand All @@ -1322,8 +1362,36 @@ func removeLabels(nodeSelector map[string]string, targetNode *corev1.Node) {
mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
}

func containerRuntimeConfigConfigMap(name string, profile *performancev2.PerformanceProfile, profileMCP *machineconfigv1.MachineConfigPool) (*corev1.ConfigMap, error) {
containerRuntimeConfig := newContainerRuntimeConfig(name, profile, profileMCP)
yamlData, err := yaml.Marshal(containerRuntimeConfig)
if err != nil {
return nil, err
}

configMap := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "clusters",
},
Data: map[string]string{
"config": string(yamlData),
},
}

return configMap, nil
}

func newContainerRuntimeConfig(name string, profile *performancev2.PerformanceProfile, profileMCP *machineconfigv1.MachineConfigPool) *machineconfigv1.ContainerRuntimeConfig {
return &machineconfigv1.ContainerRuntimeConfig{
TypeMeta: metav1.TypeMeta{
Kind: "ContainerRuntimeConfig",
APIVersion: machineconfigv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Expand Down
48 changes: 48 additions & 0 deletions test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,54 @@ func GetByClusterName(ctx context.Context, c client.Client, hostedClusterName st
return np, nil
}

func AttachConfigObject(ctx context.Context, cli client.Client, object client.Object) error {
np, err := GetNodePool(ctx, cli)
if err != nil {
return err
}

return AttachConfigObjectToNodePool(ctx, cli, object, np)
}

func AttachConfigObjectToNodePool(ctx context.Context, cli client.Client, object client.Object, np *hypershiftv1beta1.NodePool) error {
var err error
updatedConfig := []corev1.LocalObjectReference{{Name: object.GetName()}}
for i := range np.Spec.Config {
Config := np.Spec.Config[i]
if Config.Name != object.GetName() {
updatedConfig = append(updatedConfig, Config)
}
}
np.Spec.Config = updatedConfig
if err = cli.Update(ctx, np); err != nil {
return err
}
return nil
}

func DeattachConfigObject(ctx context.Context, cli client.Client, object client.Object) error {
np, err := GetNodePool(ctx, cli)
if err != nil {
return err
}

return DeattachConfigObjectFromNodePool(ctx, cli, object, np)
}

func DeattachConfigObjectFromNodePool(ctx context.Context, cli client.Client, object client.Object, np *hypershiftv1beta1.NodePool) error {
var err error
for i := range np.Spec.Config {
if np.Spec.Config[i].Name == object.GetName() {
np.Spec.Config = append(np.Spec.Config[:i], np.Spec.Config[i+1:]...)
break
}
}
if err = cli.Update(ctx, np); err != nil {
return err
}
return nil
}

// AttachTuningObject is attaches a tuning object into the nodepool associated with the hosted-cluster
// The function is idempotent
func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {
Expand Down

0 comments on commit 5d7fe67

Please sign in to comment.