-
Notifications
You must be signed in to change notification settings - Fork 105
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: Sargun Narula <[email protected]>
- Loading branch information
1 parent
c786628
commit e9fc334
Showing
1 changed file
with
333 additions
and
0 deletions.
There are no files selected for viewing
333 changes: 333 additions & 0 deletions
333
test/e2e/performanceprofile/functests/2_performance_update/tuned_deferred.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,333 @@ | ||
package __performance_update | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
|
||
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
|
||
tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" | ||
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" | ||
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" | ||
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift" | ||
hypershiftutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift" | ||
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" | ||
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" | ||
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"strings" | ||
"time" | ||
) | ||
|
||
var _ = Describe("Tuned Deferred tests of performance profile", Ordered, Label(string("Tier5")), func() { | ||
var ( | ||
workerCNFNodes []corev1.Node | ||
err error | ||
poolName string | ||
np *hypershiftv1beta1.NodePool | ||
mcp string | ||
) | ||
|
||
type TestSettings struct { | ||
DeferMode string | ||
ProfileChangeType string // "first-time" or "in-place" | ||
ExpectedBehavior string // "deferred" or "immediate" | ||
Kernel_shmmni string // 4096 or 8192 | ||
} | ||
|
||
BeforeEach(func() { | ||
if discovery.Enabled() && testutils.ProfileNotFound { | ||
Skip("Discovery mode enabled, performance profile not found") | ||
} | ||
workerCNFNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) | ||
Expect(err).ToNot(HaveOccurred()) | ||
workerCNFNodes, err = nodes.MatchingOptionalSelector(workerCNFNodes) | ||
Expect(err).ToNot(HaveOccurred(), "error looking for the optional selector: %v", err) | ||
Expect(workerCNFNodes).ToNot(BeEmpty()) | ||
|
||
profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
if hypershift.IsHypershiftCluster() { | ||
hostedClusterName, err := hypershift.GetHostedClusterName() | ||
Expect(err).ToNot(HaveOccurred()) | ||
np, err = nodepools.GetByClusterName(context.TODO(), testclient.ControlPlaneClient, hostedClusterName) | ||
Expect(err).ToNot(HaveOccurred()) | ||
poolName = client.ObjectKeyFromObject(np).String() | ||
testlog.Infof("using NodePool: %q", poolName) | ||
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) | ||
Expect(err).ToNot(HaveOccurred()) | ||
} else { | ||
mcp, err = mcps.GetByProfile(profile) | ||
Expect(err).ToNot(HaveOccurred()) | ||
testlog.Infof("using performanceMCP: %q", mcp) | ||
} | ||
fmt.Println(profile.Name) | ||
|
||
}) | ||
|
||
Context("Tuned Deferred status", func() { | ||
|
||
DescribeTable("Validate Tuned DeferMode behavior", | ||
func(tc TestSettings) { | ||
|
||
if tc.ProfileChangeType == "first-time" { | ||
tuned := getTunedProfile() | ||
|
||
// If profile is present | ||
if tuned != nil { | ||
|
||
// Delete the profile | ||
Expect(testclient.ControlPlaneClient.Delete(context.TODO(), tuned)).To(Succeed()) | ||
if hypershiftutils.IsHypershiftCluster() { | ||
// This will trigger a reboot | ||
By("De-attaching the tuning object to the second node pool") | ||
Expect(nodepools.DeattachTuningObject(context.TODO(), testclient.DataPlaneClient, tuned)).To(Succeed()) | ||
By("Waiting for the nodepool configuration to start updating") | ||
err := nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) | ||
Expect(err).ToNot(HaveOccurred()) | ||
By("Waiting for the nodepool configuration to be ready") | ||
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) | ||
Expect(err).ToNot(HaveOccurred()) | ||
} else { | ||
rebootNodeCommand := []string{ | ||
"chroot", | ||
"/rootfs", | ||
"/bin/bash", | ||
"-c", | ||
"systemctl reboot", | ||
} | ||
_, err = runCommandOnNodeThroughMCD(context.TODO(), workerCNFNodes, rebootNodeCommand) | ||
Expect(err).ToNot(HaveOccurred()) | ||
} | ||
} | ||
} | ||
tuned, err := createTunedObject(tc.DeferMode, tc.Kernel_shmmni, np) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
switch tc.ExpectedBehavior { | ||
case "immediate": | ||
out, err := verifyImmediateApplication(tuned, workerCNFNodes, tc.Kernel_shmmni) | ||
Expect(err).To(BeNil()) | ||
Expect(out).To(BeTrue()) | ||
case "deferred": | ||
out, err := verifyDeferredApplication(tuned, workerCNFNodes) | ||
Expect(err).To(BeNil()) | ||
Expect(out).To(BeTrue()) | ||
} | ||
|
||
}, | ||
Entry("DeferAlways with first-time profile change", TestSettings{ | ||
DeferMode: "always", | ||
ProfileChangeType: "first-time", | ||
ExpectedBehavior: "deferred", | ||
Kernel_shmmni: "4096", | ||
}), | ||
Entry("DeferAlways with in-place profile update", TestSettings{ | ||
DeferMode: "always", | ||
ProfileChangeType: "in-place", | ||
ExpectedBehavior: "deferred", | ||
Kernel_shmmni: "8196", | ||
}), | ||
Entry("DeferUpdate with first-time profile change", TestSettings{ | ||
DeferMode: "update", | ||
ProfileChangeType: "first-time", | ||
ExpectedBehavior: "immediate", | ||
Kernel_shmmni: "4096", | ||
}), | ||
Entry("DeferUpdate with in-place profile update", TestSettings{ | ||
DeferMode: "update", | ||
ProfileChangeType: "in-place", | ||
ExpectedBehavior: "deferred", | ||
Kernel_shmmni: "8196", | ||
}), | ||
Entry("No annotation (default behavior) with first-time profile change", TestSettings{ | ||
DeferMode: "", // No defer annotation | ||
ProfileChangeType: "first-time", | ||
ExpectedBehavior: "immediate", | ||
Kernel_shmmni: "4096", | ||
}), | ||
Entry("Never mode with in-place profile update", TestSettings{ | ||
DeferMode: "never", | ||
ProfileChangeType: "in-place", | ||
ExpectedBehavior: "immediate", | ||
Kernel_shmmni: "8196", | ||
}), | ||
) | ||
|
||
}) | ||
}) | ||
|
||
func getTunedProfile() *tunedv1.Tuned { | ||
tunedList := &tunedv1.TunedList{} | ||
tunedName := "performance-patch" // Replace this with your dynamic name if needed | ||
var matchedTuned *tunedv1.Tuned | ||
|
||
Eventually(func(g Gomega) { | ||
// List all Tuned objects in the namespace | ||
g.Expect(testclient.DataPlaneClient.List(context.TODO(), tunedList, &client.ListOptions{ | ||
Namespace: components.NamespaceNodeTuningOperator, | ||
})).To(Succeed()) | ||
|
||
// Ensure there are Tuned items to process | ||
g.Expect(len(tunedList.Items)).To(BeNumerically(">", 1)) | ||
|
||
// Find the Tuned object with the name "performance-patch" | ||
for _, tuned := range tunedList.Items { | ||
fmt.Println(tuned.Name, tunedName) | ||
if tuned.Name == tunedName { | ||
matchedTuned = &tuned | ||
break | ||
} | ||
} | ||
|
||
}).WithTimeout(time.Minute * 3).WithPolling(time.Second * 10).Should(Succeed()) | ||
|
||
// Check if no matching Tuned profile was found | ||
if matchedTuned == nil { | ||
return nil | ||
} | ||
// Return the matched Tuned object | ||
return matchedTuned | ||
} | ||
|
||
// CreateTuned creates a Tuned profile. | ||
func createTunedObject(deferMode string, kernel_shmmni string, np *hypershiftv1beta1.NodePool) (*tunedv1.Tuned, error) { | ||
tunedName := "performance-patch" | ||
ns := components.NamespaceNodeTuningOperator | ||
priority := uint64(19) | ||
data := fmt.Sprintf(` | ||
[main] | ||
summary=Configuration changes profile inherited from performance created tuned | ||
include=openshift-node-performance-performance | ||
[sysctl] | ||
kernel.shmmni=%s | ||
`, kernel_shmmni) | ||
|
||
// Create a Tuned object | ||
tuned := &tunedv1.Tuned{ | ||
TypeMeta: metav1.TypeMeta{ | ||
APIVersion: tunedv1.SchemeGroupVersion.String(), | ||
Kind: "Tuned", | ||
}, | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: tunedName, | ||
Namespace: ns, | ||
Annotations: map[string]string{ | ||
"tuned.openshift.io/deferred": deferMode, | ||
}, | ||
}, | ||
Spec: tunedv1.TunedSpec{ | ||
Profile: []tunedv1.TunedProfile{ | ||
{ | ||
Name: &tunedName, | ||
Data: &data, | ||
}, | ||
}, | ||
Recommend: []tunedv1.TunedRecommend{ | ||
{ | ||
MachineConfigLabels: map[string]string{"machineconfiguration.openshift.io/role": testutils.RoleWorkerCNF}, | ||
Priority: &priority, | ||
Profile: &tunedName, | ||
}, | ||
}, | ||
}, | ||
} | ||
|
||
// Create the Tuned object in the cluster | ||
fmt.Println("Creating Tuned Profle") | ||
Expect(testclient.ControlPlaneClient.Create(context.TODO(), tuned)).To(Succeed()) | ||
fmt.Println("Tuned Profle created") | ||
fmt.Println("Verifying") | ||
tuned = getTunedProfile() | ||
fmt.Println("\n\n\n", tuned.Name, "\n\n\n", tuned.Spec) | ||
if hypershiftutils.IsHypershiftCluster() { | ||
By("Attaching the tuning object to the second node pool") | ||
Expect(nodepools.AttachTuningObject(context.TODO(), testclient.ControlPlaneClient, tuned)).To(Succeed()) | ||
|
||
By("Waiting for the nodepool configuration to start updating") | ||
err := nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
By("Waiting for the nodepool configuration to be ready") | ||
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) | ||
Expect(err).ToNot(HaveOccurred()) | ||
} | ||
return tuned, nil | ||
} | ||
|
||
func verifyImmediateApplication(tuned *tunedv1.Tuned, workerCNFNodes []corev1.Node, kernel_shmmni string) (bool, error) { | ||
checkKernelShmmni := []string{ | ||
"chroot", | ||
"/rootfs", | ||
"/bin/bash", | ||
"-c", | ||
"sysctl -a | grep shmmni", | ||
} | ||
out, err := runCommandOnNodeThroughMCD(context.TODO(), workerCNFNodes, checkKernelShmmni) | ||
if err != nil { | ||
expectedSubstring := fmt.Sprintf("kernel.shmmni = %s", kernel_shmmni) | ||
if strings.Contains(out, expectedSubstring) { | ||
return true, nil | ||
} | ||
// If the expected substring is not found | ||
return false, nil | ||
} | ||
return false, err | ||
} | ||
|
||
func verifyDeferredApplication(tuned *tunedv1.Tuned, nodes []corev1.Node) (bool, error) { | ||
expectedMessage := fmt.Sprintf("The TuneD daemon profile is waiting for the next node restart: %s", tuned.Name) | ||
profile := &tunedv1.Profile{} | ||
profiles := &tunedv1.ProfileList{} | ||
err := testclient.DataPlaneClient.List(context.TODO(), profiles, &client.ListOptions{ | ||
Namespace: components.NamespaceNodeTuningOperator, | ||
}) | ||
if err != nil { | ||
return false, err | ||
} | ||
fmt.Println("\n\n\n", profiles) | ||
|
||
node := nodes[0] | ||
err = testclient.DataPlaneClient.Get(context.TODO(), client.ObjectKey{Name: node.Name, Namespace: tuned.Namespace}, profile) | ||
if err != nil { | ||
return false, fmt.Errorf("failed to get profile: %w", err) | ||
} | ||
|
||
// Iterate over the conditions to find the matching message | ||
for _, condition := range profile.Status.Conditions { | ||
if condition.Message == expectedMessage { | ||
return true, nil | ||
} | ||
} | ||
|
||
// Return false if the condition message is not found | ||
return false, nil | ||
} | ||
|
||
func runCommandOnNodeThroughMCD(ctx context.Context, CNFnodes []corev1.Node, command []string) (string, error) { | ||
var lastErr error // Track the last error, if any | ||
var out string | ||
|
||
for _, node := range CNFnodes { | ||
// Execute the command on the node | ||
output, err := nodes.ExecCommand(ctx, &node, command) | ||
if err != nil { | ||
testlog.Errorf("node %q: error while executing command: %v", err) | ||
lastErr = err | ||
} | ||
out = string(output) | ||
} | ||
|
||
return string(out), lastErr | ||
} |