From 362196e5b5a2a5d7797248b30c34ff0781bc4887 Mon Sep 17 00:00:00 2001 From: Ronny Baturov Date: Tue, 4 Jun 2024 11:48:07 +0300 Subject: [PATCH] Unified ExecCommand and ExecCommandToString Refactor all calls to the unified function: ExecCommand. Added a util ToString function that will be used for the calls that require the output in a string format. Signed-off-by: Ronny Baturov --- .../functests/11_mixedcpus/mixedcpus.go | 11 +-- .../functests/1_performance/cpu_management.go | 54 +++++++++------ .../functests/1_performance/irqbalance.go | 13 +++- .../functests/1_performance/netqueues.go | 6 +- .../functests/1_performance/performance.go | 42 +++++++---- .../functests/1_performance/rt-kernel.go | 3 +- .../2_performance_update/memorymanager.go | 3 +- .../2_performance_update/updating_profile.go | 69 +++++++++++++------ .../7_performance_kubelet_node/cgroups.go | 39 ++++++++--- .../7_performance_kubelet_node/kubelet.go | 6 +- .../workloadhints.go | 15 ++-- .../functests/utils/cgroup/runtime/runtime.go | 4 +- .../functests/utils/nodes/nodes.go | 35 +++++----- .../functests/utils/tuned/tuned.go | 26 ++++--- .../functests/utils/utils.go | 10 ++- 15 files changed, 222 insertions(+), 114 deletions(-) diff --git a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go index 8e4c672307..7d47b4f822 100644 --- a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go +++ b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go @@ -81,7 +81,8 @@ var _ = Describe("Mixedcpus", Ordered, func() { // test arbitrary one should be good enough worker := &workers[0] cmd := isFileExistCmd(kubeletMixedCPUsConfigFile) - found, err := nodes.ExecCommandToString(ctx, cmd, worker) + out, err := nodes.ExecCommand(ctx, worker, cmd) + found := testutils.ToString(out) Expect(err).ToNot(HaveOccurred(), "failed to execute command on node; cmd=%q node=%q", cmd, worker) Expect(found).To(Equal("true"), "file not found; file=%q", kubeletMixedCPUsConfigFile) }) @@ -112,7 +113,8 @@ var _ = Describe("Mixedcpus", Ordered, func() { "-c", fmt.Sprintf("/bin/awk -F '\"' '/shared_cpuset.*/ { print $2 }' %s", runtime.CRIORuntimeConfigFile), } - cpus, err := nodes.ExecCommandToString(ctx, cmd, worker) + out, err := nodes.ExecCommand(ctx, worker, cmd) + cpus := testutils.ToString(out) Expect(err).ToNot(HaveOccurred(), "failed to execute command on node; cmd=%q node=%q", cmd, worker) cpus = strings.Trim(cpus, "\n") crioShared := mustParse(cpus) @@ -268,7 +270,7 @@ var _ = Describe("Mixedcpus", Ordered, func() { cmd := kubeletRestartCmd() // The command would fail since it aborts all the pods during restart - _, _ = nodes.ExecCommandToString(ctx, cmd, node) + _, _ = nodes.ExecCommand(ctx, node, cmd) // check that the node is ready after we restart Kubelet nodes.WaitForReadyOrFail("post restart", node.Name, 20*time.Minute, 3*time.Second) @@ -616,7 +618,8 @@ func fetchSharedCPUsFromEnv(c *kubernetes.Clientset, p *corev1.Pod, containerNam // getCPUswithLoadBalanceDisabled Return cpus which are not in any scheduling domain func getCPUswithLoadBalanceDisabled(ctx context.Context, targetNode *corev1.Node) ([]string, error) { cmd := []string{"/bin/bash", "-c", "cat /proc/schedstat"} - schedstatData, err := nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err := nodes.ExecCommand(ctx, targetNode, cmd) + schedstatData := testutils.ToString(out) if err != nil { return nil, err } diff --git a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go index fe8011808d..e90247a539 100644 --- a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go +++ b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go @@ -120,8 +120,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { It("[test_id:37862][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU affinity mask, CPU reservation and CPU isolation on worker node", func() { By("checking isolated CPU") cmd := []string{"cat", "/sys/devices/system/cpu/isolated"} - sysIsolatedCpus, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + sysIsolatedCpus := testutils.ToString(out) if balanceIsolated { Expect(sysIsolatedCpus).To(BeEmpty()) } else { @@ -130,15 +131,17 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("checking reserved CPU in kubelet config file") cmd = []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"} - conf, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err = nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred(), "failed to cat kubelet.conf") + conf := testutils.ToString(out) // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex Expect(conf).To(MatchRegexp(fmt.Sprintf(`"reservedSystemCPUs": ?"%s"`, reservedCPU))) By("checking CPU affinity mask for kernel scheduler") cmd = []string{"/bin/bash", "-c", "taskset -pc 1"} - sched, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err = nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred(), "failed to execute taskset") + sched := testutils.ToString(out) mask := strings.SplitAfter(sched, " ") maskSet, err := cpuset.Parse(mask[len(mask)-1]) Expect(err).ToNot(HaveOccurred()) @@ -149,8 +152,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { It("[test_id:34358] Verify rcu_nocbs kernel argument on the node", func() { By("checking that cmdline contains rcu_nocbs with right value") cmd := []string{"cat", "/proc/cmdline"} - cmdline, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + cmdline := testutils.ToString(out) re := regexp.MustCompile(`rcu_nocbs=\S+`) rcuNocbsArgument := re.FindString(cmdline) Expect(rcuNocbsArgument).To(ContainSubstring("rcu_nocbs=")) @@ -159,13 +163,15 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("checking that new rcuo processes are running on non_isolated cpu") cmd = []string{"pgrep", "rcuo"} - rcuoList, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err = nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + rcuoList := testutils.ToString(out) for _, rcuo := range strings.Split(rcuoList, "\n") { // check cpu affinity mask cmd = []string{"/bin/bash", "-c", fmt.Sprintf("taskset -pc %s", rcuo)} - taskset, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + taskset := testutils.ToString(out) mask := strings.SplitAfter(taskset, " ") maskSet, err := cpuset.Parse(mask[len(mask)-1]) Expect(err).ToNot(HaveOccurred()) @@ -246,11 +252,12 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { "unexpected QoS Class for %s/%s: %s (looking for %s)", updatedPod.Namespace, updatedPod.Name, updatedPod.Status.QOSClass, expectedQos) - output, err := nodes.ExecCommandToString(ctx, - []string{"/bin/bash", "-c", "ps -o psr $(pgrep -n stress) | tail -1"}, + out, err := nodes.ExecCommand(ctx, workerRTNode, + []string{"/bin/bash", "-c", "ps -o psr $(pgrep -n stress) | tail -1"}, ) Expect(err).ToNot(HaveOccurred(), "failed to get cpu of stress process") + output := testutils.ToString(out) cpu, err := strconv.Atoi(strings.Trim(output, " ")) Expect(err).ToNot(HaveOccurred()) @@ -295,7 +302,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { "systemctl restart kubelet", } - _, _ = nodes.ExecCommandToString(ctx, kubeletRestartCmd, workerRTNode) + _, _ = nodes.ExecCommand(ctx, workerRTNode, kubeletRestartCmd) nodes.WaitForReadyOrFail("post kubele restart", workerRTNode.Name, 20*time.Minute, 3*time.Second) // giving kubelet more time to stabilize and initialize itself before testlog.Infof("post restart: entering cooldown time: %v", restartCooldownTime) @@ -370,8 +377,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { // It may takes some time for the system to reschedule active IRQs Eventually(func() bool { getActiveIrq := []string{"/bin/bash", "-c", "for n in $(find /proc/irq/ -name smp_affinity_list); do echo $(cat $n); done"} - activeIrq, err := nodes.ExecCommandToString(context.TODO(), getActiveIrq, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, getActiveIrq) Expect(err).ToNot(HaveOccurred()) + activeIrq := testutils.ToString(out) Expect(activeIrq).ToNot(BeEmpty()) for _, irq := range strings.Split(activeIrq, "\n") { irqAffinity, err := cpuset.Parse(irq) @@ -424,8 +432,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { Eventually(func() string { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name *%s*", cpusetPath, podUID)} - podCgroup, err = nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + podCgroup = testutils.ToString(out) return podCgroup }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), fmt.Sprintf("cannot find cgroup for pod %q", podUID)) @@ -433,8 +442,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { containersCgroups := "" Eventually(func() string { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name crio-*", podCgroup)} - containersCgroups, err = nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) + containersCgroups = testutils.ToString(out) return containersCgroups }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), fmt.Sprintf("cannot find containers cgroups from pod cgroup %q", podCgroup)) @@ -454,9 +464,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("Checking what CPU the infra container is using") cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", dir)} - output, err := nodes.ExecCommandToString(context.TODO(), cmd, workerRTNode) + out, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) - + output := testutils.ToString(out) cpus, err := cpuset.Parse(output) Expect(err).ToNot(HaveOccurred()) @@ -710,8 +720,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { Expect(err).ToNot(HaveOccurred(), "Unable to parse pod cpus") kubepodsExclusiveCpus := fmt.Sprintf("%s/kubepods.slice/cpuset.cpus.exclusive", cgroupRoot) cmd := []string{"cat", kubepodsExclusiveCpus} - exclusiveCpus, err := nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err := nodes.ExecCommand(ctx, targetNode, cmd) Expect(err).ToNot(HaveOccurred()) + exclusiveCpus := testutils.ToString(out) exclusiveCpuset, err := cpuset.Parse(exclusiveCpus) Expect(err).ToNot(HaveOccurred(), "unable to parse cpuset.cpus.exclusive") Expect(podCpuset.Equals(exclusiveCpuset)).To(BeTrue()) @@ -752,8 +763,9 @@ func checkForWorkloadPartitioning(ctx context.Context) bool { "-c", "echo CHECK ; /bin/grep -rEo 'activation_annotation.*target\\.workload\\.openshift\\.io/management.*' /etc/crio/crio.conf.d/ || true", } - output, err := nodes.ExecCommandToString(ctx, cmd, workerRTNode) + out, err := nodes.ExecCommand(ctx, workerRTNode, cmd) Expect(err).ToNot(HaveOccurred(), "Unable to check cluster for Workload Partitioning enabled") + output := testutils.ToString(out) re := regexp.MustCompile(`activation_annotation.*target\.workload\.openshift\.io/management.*`) return re.MatchString(fmt.Sprint(output)) } @@ -773,9 +785,9 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { node, err := nodes.GetByName(testpod.Spec.NodeName) Expect(err).ToNot(HaveOccurred(), "failed to get node %q", testpod.Spec.NodeName) Expect(testpod.Spec.NodeName).ToNot(BeEmpty(), "testpod %s/%s still pending - no nodeName set", testpod.Namespace, testpod.Name) - output, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) Expect(err).ToNot(HaveOccurred(), "Unable to crictl inspect containerID %q", containerID) - + output := testutils.ToString(out) podcpus, err := cpuset.Parse(strings.Trim(output, "\n")) Expect(err).ToNot( HaveOccurred(), "Unable to cpuset.Parse pod allocated cpu set from output %s", output) @@ -797,11 +809,12 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { "-c", fmt.Sprintf("/bin/cat %s | /bin/sort -u", hostHTSiblingPaths.String()), } - output, err = nodes.ExecCommandToString(ctx, cmd, workerRTNode) + out, err = nodes.ExecCommand(ctx, workerRTNode, cmd) Expect(err).ToNot( HaveOccurred(), "Unable to read host thread_siblings_list files", ) + output = testutils.ToString(out) // output is newline seperated. Convert to cpulist format by replacing internal "\n" chars with "," hostHTSiblings := strings.ReplaceAll( @@ -978,10 +991,11 @@ func logEventsForPod(testPod *corev1.Pod) { // getCPUswithLoadBalanceDisabled Return cpus which are not in any scheduling domain func getCPUswithLoadBalanceDisabled(ctx context.Context, targetNode *corev1.Node) ([]string, error) { cmd := []string{"/bin/bash", "-c", "cat /proc/schedstat"} - schedstatData, err := nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err := nodes.ExecCommand(ctx, targetNode, cmd) if err != nil { return nil, err } + schedstatData := testutils.ToString(out) info, err := schedstat.ParseData(strings.NewReader(schedstatData)) if err != nil { diff --git a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go index 9636dfa38d..e57275885e 100644 --- a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go +++ b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go @@ -313,8 +313,9 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() { origBannedCPUsFile := "/etc/sysconfig/orig_irq_banned_cpus" By(fmt.Sprintf("Checking content of %q on node %q", origBannedCPUsFile, node.Name)) fullPath := filepath.Join("/", "rootfs", origBannedCPUsFile) - out, err := nodes.ExecCommandToString(context.TODO(), []string{"/usr/bin/cat", fullPath}, node) + output, err := nodes.ExecCommand(context.TODO(), node, []string{"/usr/bin/cat", fullPath}) Expect(err).ToNot(HaveOccurred()) + out := testutils.ToString(output) out = strings.TrimSuffix(out, "\r\n") Expect(out).To(Equal("0"), "file %s does not contain the expect output; expected=0 actual=%s", fullPath, out) }) @@ -327,10 +328,11 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() { // require close attention. For the time being we reimplement a form of nodes.BannedCPUs which can handle empty ban list. func getIrqBalanceBannedCPUs(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, error) { cmd := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"} - conf, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) if err != nil { return cpuset.New(), err } + conf := testutils.ToString(out) keyValue := findIrqBalanceBannedCPUsVarFromConf(conf) if len(keyValue) == 0 { @@ -361,7 +363,12 @@ func getIrqBalanceBannedCPUs(ctx context.Context, node *corev1.Node) (cpuset.CPU func getIrqDefaultSMPAffinity(ctx context.Context, node *corev1.Node) (string, error) { cmd := []string{"cat", "/rootfs/proc/irq/default_smp_affinity"} - return nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } func findIrqBalanceBannedCPUsVarFromConf(conf string) string { diff --git a/test/e2e/performanceprofile/functests/1_performance/netqueues.go b/test/e2e/performanceprofile/functests/1_performance/netqueues.go index 52f580011f..4eb1c5f5b0 100644 --- a/test/e2e/performanceprofile/functests/1_performance/netqueues.go +++ b/test/e2e/performanceprofile/functests/1_performance/netqueues.go @@ -401,16 +401,18 @@ func getReservedCPUSize(CPU *performancev2.CPU) int { func getVendorID(ctx context.Context, node corev1.Node, device string) string { cmd := []string{"bash", "-c", fmt.Sprintf("cat /sys/class/net/%s/device/vendor", device)} - stdout, err := nodes.ExecCommandToString(ctx, cmd, &node) + out, err := nodes.ExecCommand(ctx, &node, cmd) Expect(err).ToNot(HaveOccurred()) + stdout := testutils.ToString(out) return stdout } func getDeviceID(ctx context.Context, node corev1.Node, device string) string { cmd := []string{"bash", "-c", fmt.Sprintf("cat /sys/class/net/%s/device/device", device)} - stdout, err := nodes.ExecCommandToString(ctx, cmd, &node) + out, err := nodes.ExecCommand(ctx, &node, cmd) Expect(err).ToNot(HaveOccurred()) + stdout := testutils.ToString(out) return stdout } diff --git a/test/e2e/performanceprofile/functests/1_performance/performance.go b/test/e2e/performanceprofile/functests/1_performance/performance.go index 1034a81f55..5c184abe01 100644 --- a/test/e2e/performanceprofile/functests/1_performance/performance.go +++ b/test/e2e/performanceprofile/functests/1_performance/performance.go @@ -224,32 +224,38 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }) It("[test_id:42400][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running as sched_fifo", func() { for _, node := range workerRTNodes { - pid, err := nodes.ExecCommandToString(context.TODO(), []string{"pidof", "stalld"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"pidof", "stalld"}) Expect(err).ToNot(HaveOccurred()) + pid := testutils.ToString(out) Expect(pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandToString(context.TODO(), []string{"chrt", "-ap", pid}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"chrt", "-ap", pid}) Expect(err).ToNot(HaveOccurred()) + sched_tasks := testutils.ToString(out) Expect(sched_tasks).To(ContainSubstring("scheduling policy: SCHED_FIFO")) Expect(sched_tasks).To(ContainSubstring("scheduling priority: 10")) } }) It("[test_id:42696][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] Stalld runs in higher priority than ksoftirq and rcu{c,b}", func() { for _, node := range workerRTNodes { - stalld_pid, err := nodes.ExecCommandToString(context.TODO(), []string{"pidof", "stalld"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"pidof", "stalld"}) Expect(err).ToNot(HaveOccurred()) + stalld_pid := testutils.ToString(out) Expect(stalld_pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandToString(context.TODO(), []string{"chrt", "-ap", stalld_pid}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"chrt", "-ap", stalld_pid}) Expect(err).ToNot(HaveOccurred()) + sched_tasks := testutils.ToString(out) re := regexp.MustCompile("scheduling priority: ([0-9]+)") match := re.FindStringSubmatch(sched_tasks) stalld_prio, err := strconv.Atoi(match[1]) Expect(err).ToNot(HaveOccurred()) - ksoftirq_pid, err := nodes.ExecCommandToString(context.TODO(), []string{"pgrep", "-f", "ksoftirqd", "-n"}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"pgrep", "-f", "ksoftirqd", "-n"}) Expect(err).ToNot(HaveOccurred()) + ksoftirq_pid := testutils.ToString(out) Expect(ksoftirq_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandToString(context.TODO(), []string{"chrt", "-ap", ksoftirq_pid}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"chrt", "-ap", ksoftirq_pid}) Expect(err).ToNot(HaveOccurred()) + sched_tasks = testutils.ToString(out) match = re.FindStringSubmatch(sched_tasks) ksoftirq_prio, err := strconv.Atoi(match[1]) Expect(err).ToNot(HaveOccurred()) @@ -263,11 +269,13 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { } //rcuc/n : kthreads that are pinned to CPUs & are responsible to execute the callbacks of rcu threads . //rcub/n : are boosting kthreads ,responsible to monitor per-cpu arrays of lists of tasks that were blocked while in an rcu read-side critical sections. - rcu_pid, err := nodes.ExecCommandToString(context.TODO(), []string{"pgrep", "-f", "rcu[c,b]", "-n"}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"pgrep", "-f", "rcu[c,b]", "-n"}) Expect(err).ToNot(HaveOccurred()) + rcu_pid := testutils.ToString(out) Expect(rcu_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandToString(context.TODO(), []string{"chrt", "-ap", rcu_pid}, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, []string{"chrt", "-ap", rcu_pid}) Expect(err).ToNot(HaveOccurred()) + sched_tasks = testutils.ToString(out) match = re.FindStringSubmatch(sched_tasks) rcu_prio, err := strconv.Atoi(match[1]) Expect(err).ToNot(HaveOccurred()) @@ -307,8 +315,9 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { } // Getting the list of processes that are running on the root cgroup, filtering out the kernel threads (are presented in [square brackets]). command := fmt.Sprintf("cat %s | xargs ps -o cmd | grep -v \"\\[\"", rootCgroupPath) - output, err := nodes.ExecCommandToString(context.TODO(), []string{"/bin/bash", "-c", command}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"/bin/bash", "-c", command}) Expect(err).ToNot(HaveOccurred()) + output := testutils.ToString(out) cmds := strings.Split(output, "\n") processesFound = append(processesFound, cmds[1:]...) Expect(processesFound).To(BeEmpty(), "The node %s has the following processes on the root cgroup: %v", node.Name, processesFound) @@ -370,8 +379,9 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, vethinterface := range vethInterfaces { devicePath := fmt.Sprintf("%s/%s", "/rootfs/sys/devices/virtual/net", vethinterface) getRPSMaskCmd := []string{"find", devicePath, "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"} - devsRPS, err := nodes.ExecCommandToString(context.TODO(), getRPSMaskCmd, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, getRPSMaskCmd) Expect(err).ToNot(HaveOccurred()) + devsRPS := testutils.ToString(out) for _, devRPS := range strings.Split(devsRPS, "\n") { rpsCPUs, err := components.CPUMaskToCPUSet(devRPS) Expect(err).ToNot(HaveOccurred()) @@ -410,8 +420,9 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, node := range workerRTNodes { By("verify the systemd RPS service uses the correct RPS mask") cmd := []string{"sysctl", "-n", "net.core.rps_default_mask"} - rpsMaskContent, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node) + rpsMaskContent := testutils.ToString(out) rpsMaskContent = strings.TrimSuffix(rpsMaskContent, "\n") rpsCPUs, err := components.CPUMaskToCPUSet(rpsMaskContent) Expect(err).ToNot(HaveOccurred(), "failed to parse RPS mask %q", rpsMaskContent) @@ -427,8 +438,9 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { "-printf", "%p ", "-exec", "cat", "{}", ";", } - devsRPSContent, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) + devsRPSContent := testutils.ToString(out) devsRPSMap := makeDevRPSMap(devsRPSContent) for path, mask := range devsRPSMap { rpsCPUs, err = components.CPUMaskToCPUSet(mask) @@ -446,9 +458,9 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { "-printf", "%p ", "-exec", "cat", "{}", ";", } - devsRPSContent, err = nodes.ExecCommandToString(context.TODO(), cmd, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) - + devsRPSContent = testutils.ToString(out) devsRPSMap = makeDevRPSMap(devsRPSContent) for path, mask := range devsRPSMap { rpsCPUs, err = components.CPUMaskToCPUSet(mask) @@ -463,7 +475,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, node := range workerRTNodes { // Verify the systemd RPS services were not created cmd := []string{"ls", "/rootfs/etc/systemd/system/update-rps@.service"} - _, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + _, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).To(HaveOccurred()) } } diff --git a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go index 2152b77969..768515a086 100644 --- a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go +++ b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go @@ -65,8 +65,9 @@ var _ = Describe("[performance]RT Kernel", Ordered, func() { } cmd := []string{"uname", "-a"} - kernel, err := nodes.ExecCommandToString(context.TODO(), cmd, &nonPerformancesWorkers[0]) + out, err := nodes.ExecCommand(context.TODO(), &nonPerformancesWorkers[0], cmd) Expect(err).ToNot(HaveOccurred(), "failed to execute uname") + kernel := testutils.ToString(out) Expect(kernel).To(ContainSubstring("Linux"), "Node should have Linux string") err = nodes.HasPreemptRTKernel(context.TODO(), &nonPerformancesWorkers[0]) diff --git a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go index 234a3b831e..7d693463e4 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go @@ -828,7 +828,8 @@ func GetMemoryNodes(ctx context.Context, testPod *corev1.Pod, targetNode *corev1 cpusetMemsPath = filepath.Join(fullPath, "cpuset.mems") } cmd = []string{"cat", cpusetMemsPath} - memoryNodes, err = nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err = nodes.ExecCommand(ctx, targetNode, cmd) + memoryNodes = testutils.ToString(out) testlog.Infof("test pod %s with container id %s has Memory nodes %s", testPod.Name, containerID, memoryNodes) return memoryNodes, err } diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 8f47d0bb7c..744c2fe081 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -48,10 +48,20 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance chkIrqbalance := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"} chkCmdLineFn := func(ctx context.Context, node *corev1.Node) (string, error) { - return nodes.ExecCommandToString(ctx, chkCmdLine, node) + out, err := nodes.ExecCommand(ctx, node, chkCmdLine) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } chkKubeletConfigFn := func(ctx context.Context, node *corev1.Node) (string, error) { - return nodes.ExecCommandToString(ctx, chkKubeletConfig, node) + out, err := nodes.ExecCommand(ctx, node, chkKubeletConfig) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } chkHugepages2MFn := func(ctx context.Context, node *corev1.Node) (string, error) { @@ -166,8 +176,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance for _, node := range workerRTNodes { for i := 0; i < 2; i++ { nodeCmd := []string{"cat", hugepagesPathForNode(i, 2)} - result, err := nodes.ExecCommandToString(context.TODO(), nodeCmd, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, nodeCmd) Expect(err).ToNot(HaveOccurred()) + result := testutils.ToString(out) t, err := strconv.Atoi(result) Expect(err).ToNot(HaveOccurred()) @@ -307,8 +318,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:28612]Verify that Kernel arguments can me updated (added, removed) thru performance profile", func() { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandToString(context.TODO(), chkCmdLine, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, chkCmdLine) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) + cmdline := testutils.ToString(out) // Verifying that new argument was added Expect(cmdline).To(ContainSubstring("new-argument=test")) @@ -446,8 +458,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:28440]Verifies that nodeSelector can be updated in performance profile", func() { kubeletConfig, err := nodes.GetKubeletConfig(context.TODO(), newCnfNode) Expect(kubeletConfig.TopologyManagerPolicy).ToNot(BeEmpty()) - cmdline, err := nodes.ExecCommandToString(context.TODO(), chkCmdLine, newCnfNode) + out, err := nodes.ExecCommand(context.TODO(), newCnfNode, chkCmdLine) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) + cmdline := testutils.ToString(out) Expect(cmdline).To(ContainSubstring("tuned.non_isolcpus")) }) @@ -467,8 +480,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance err = nodes.HasPreemptRTKernel(context.TODO(), newCnfNode) Expect(err).To(HaveOccurred()) - cmdline, err := nodes.ExecCommandToString(context.TODO(), chkCmdLine, newCnfNode) + out, err := nodes.ExecCommand(context.TODO(), newCnfNode, chkCmdLine) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) + cmdline := testutils.ToString(out) Expect(cmdline).NotTo(ContainSubstring("tuned.non_isolcpus")) kblcfg, err := nodes.GetKubeletConfig(context.TODO(), newCnfNode) @@ -478,8 +492,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance reservedCPU := string(*profile.Spec.CPU.Reserved) cpuMask, err := components.CPUListToHexMask(reservedCPU) Expect(err).ToNot(HaveOccurred(), "failed to list in Hex %s", reservedCPU) - irqBal, err := nodes.ExecCommandToString(context.TODO(), chkIrqbalance, newCnfNode) + out, err = nodes.ExecCommand(context.TODO(), newCnfNode, chkIrqbalance) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkIrqbalance) + irqBal := testutils.ToString(out) Expect(irqBal).NotTo(ContainSubstring(cpuMask)) }) @@ -558,8 +573,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance initialProfile = profile.DeepCopy() for _, node := range workerRTNodes { - onlineCPUCount, err := nodes.ExecCommandToString(context.TODO(), []string{"nproc", "--all"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"nproc", "--all"}) Expect(err).ToNot(HaveOccurred()) + onlineCPUCount := testutils.ToString(out) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) Expect(err).ToNot(HaveOccurred()) @@ -602,8 +618,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandToString(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/sys/devices/system/cpu/offline"}) Expect(err).ToNot(HaveOccurred()) + offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlined)) Expect(err).ToNot(HaveOccurred()) @@ -670,8 +687,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() // Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandToString(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/sys/devices/system/cpu/offline"}) Expect(err).ToNot(HaveOccurred()) + offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) @@ -734,8 +752,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandToString(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/sys/devices/system/cpu/offline"}) Expect(err).ToNot(HaveOccurred()) + offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) @@ -808,8 +827,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandToString(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/sys/devices/system/cpu/offline"}) Expect(err).ToNot(HaveOccurred()) + offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) @@ -924,8 +944,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandToString(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/sys/devices/system/cpu/offline"}) Expect(err).ToNot(HaveOccurred()) + offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) @@ -960,8 +981,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance findcmd := `find /sys/devices/system/cpu/cpu* -type f -name online -exec cat {} \;` checkCpuStatusCmd := []string{"bash", "-c", findcmd} for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandToString(context.TODO(), checkCpuStatusCmd, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, checkCpuStatusCmd) Expect(err).NotTo(HaveOccurred()) + stdout := testutils.ToString(out) v := strings.Split(stdout, "\n") for _, val := range v { Expect(val).To(Equal("1")) @@ -1006,8 +1028,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance var reserved, isolated []string var onlineCPUInt int for _, node := range workerRTNodes { - onlineCPUCount, err := nodes.ExecCommandToString(context.TODO(), []string{"nproc", "--all"}, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, []string{"nproc", "--all"}) Expect(err).ToNot(HaveOccurred()) + onlineCPUCount := testutils.ToString(out) onlineCPUInt, err = strconv.Atoi(onlineCPUCount) Expect(err).ToNot(HaveOccurred()) if onlineCPUInt <= 8 { @@ -1065,8 +1088,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance // Verify the systemd RPS service uses the correct RPS mask var maskContent string cmd := []string{"sysctl", "-n", "net.core.rps_default_mask"} - maskContent, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node) + maskContent = testutils.ToString(out) rpsMaskContent := strings.Trim(maskContent, "\n") rpsCPUs, err := components.CPUMaskToCPUSet(rpsMaskContent) Expect(err).ToNot(HaveOccurred(), "failed to parse RPS mask %q", rpsMaskContent) @@ -1081,8 +1105,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance "-name", "rps_cpus", "-exec", "cat", "{}", ";", } - devsRPS, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + out, err = nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) + devsRPS := testutils.ToString(out) for _, devRPS := range strings.Split(devsRPS, "\n") { rpsCPUs, err = components.CPUMaskToCPUSet(devRPS) Expect(err).ToNot(HaveOccurred()) @@ -1114,7 +1139,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance for _, node := range workerRTNodes { // Verify the systemd RPS services were not created cmd := []string{"ls", "/rootfs/etc/systemd/system/update-rps@.service"} - _, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + _, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).To(HaveOccurred()) } }) @@ -1141,8 +1166,9 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance } cmd := []string{"cat", "/rootfs/etc/crio/crio.conf.d/99-runtimes.conf"} for i := 0; i < len(workerRTNodes); i++ { - out, err := nodes.ExecCommandToString(context.TODO(), cmd, &workerRTNodes[i]) + output, err := nodes.ExecCommand(context.TODO(), &workerRTNodes[i], cmd) Expect(err).ToNot(HaveOccurred(), "cannot get 99-runtimes.conf from %q", workerRTNodes[i].Name) + out := testutils.ToString(output) By(fmt.Sprintf("checking node: %q", workerRTNodes[i].Name)) Expect(out).To(ContainSubstring("/bin/runc")) Expect(out).To(ContainSubstring("/run/runc")) @@ -1173,7 +1199,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(ctrcfg.Spec.ContainerRuntimeConfig.DefaultRuntime == machineconfigv1.ContainerRuntimeDefaultRuntimeCrun).To(BeTrue()) cmd := []string{"cat", "/rootfs/etc/crio/crio.conf.d/99-runtimes.conf"} for i := 0; i < len(workerRTNodes); i++ { - out, err := nodes.ExecCommandToString(context.TODO(), cmd, &workerRTNodes[i]) + out, err := nodes.ExecCommand(context.TODO(), &workerRTNodes[i], cmd) Expect(err).ToNot(HaveOccurred(), "cannot get 99-runtimes.conf from %q", workerRTNodes[i].Name) By(fmt.Sprintf("checking node: %q", workerRTNodes[i].Name)) Expect(out).To(ContainSubstring("/usr/bin/crun")) @@ -1214,10 +1240,11 @@ func countHugepagesOnNode(ctx context.Context, node *corev1.Node, sizeInMb int) count := 0 for i := 0; i < len(numaInfo); i++ { nodeCmd := []string{"cat", hugepagesPathForNode(i, sizeInMb)} - result, err := nodes.ExecCommandToString(ctx, nodeCmd, node) + out, err := nodes.ExecCommand(ctx, node, nodeCmd) if err != nil { return 0, err } + result := testutils.ToString(out) t, err := strconv.Atoi(result) if err != nil { return 0, err diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go index 939b051a20..279ff2ac77 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go @@ -96,8 +96,9 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { It("[test_id:64097] Activation file is created", func() { cmd := []string{"ls", activation_file} for _, node := range workerRTNodes { - out, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + output, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "file %s doesn't exist ", activation_file) + out := testutils.ToString(output) Expect(out).To(Equal(activation_file)) } }) @@ -121,7 +122,8 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { containerCgroupPath = filepath.Join(controller, cgroupPathOfPid) } cmd = []string{"cat", fmt.Sprintf("%s", filepath.Join(containerCgroupPath, "/cpuset.cpus"))} - cpus, err := nodes.ExecCommandToString(ctx, cmd, workerRTNode) + out, err = nodes.ExecCommand(ctx, workerRTNode, cmd) + cpus := testutils.ToString(out) containerCpuset, err := cpuset.Parse(cpus) Expect(containerCpuset).To(Equal(onlineCPUSet), "Burstable pod containers cpuset.cpus do not match total online cpus") } @@ -158,8 +160,9 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { By("Checking Activation file") cmd := []string{"ls", activation_file} for _, node := range workerRTNodes { - out, err := nodes.ExecCommandToString(context.TODO(), cmd, &node) + output, err := nodes.ExecCommand(context.TODO(), &node, cmd) Expect(err).ToNot(HaveOccurred(), "file %s doesn't exist ", activation_file) + out := testutils.ToString(output) Expect(out).To(Equal(activation_file)) } }) @@ -215,15 +218,30 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { chkOvsCgrpProcs := func(node *corev1.Node) (string, error) { testlog.Info("Verify cgroup.procs is not empty") cmd := []string{"cat", cgroupProcs} - return nodes.ExecCommandToString(context.TODO(), cmd, node) + out, err := nodes.ExecCommand(context.TODO(), node, cmd) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } chkOvsCgrpCpuset := func(node *corev1.Node) (string, error) { cmd := []string{"cat", cgroupCpusetCpus} - return nodes.ExecCommandToString(context.TODO(), cmd, node) + out, err := nodes.ExecCommand(context.TODO(), node, cmd) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } chkOvsCgroupLoadBalance := func(node *corev1.Node) (string, error) { cmd := []string{"cat", cgroupLoadBalance} - return nodes.ExecCommandToString(context.TODO(), cmd, node) + out, err := nodes.ExecCommand(context.TODO(), node, cmd) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } It("[test_id:64098] Verify cgroup layout on worker node", func() { @@ -552,10 +570,11 @@ func cpuSpecToString(cpus *performancev2.CPU) string { // checkCpuCount check if the node has sufficient cpus func checkCpuCount(ctx context.Context, workerNode *corev1.Node) { - onlineCPUCount, err := nodes.ExecCommandToString(ctx, []string{"nproc", "--all"}, workerNode) + out, err := nodes.ExecCommand(ctx, workerNode, []string{"nproc", "--all"}) if err != nil { Fail(fmt.Sprintf("Failed to fetch online CPUs: %v", err)) } + onlineCPUCount := testutils.ToString(out) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) if err != nil { Fail(fmt.Sprintf("failed to convert online CPU count to integer: %v", err)) @@ -621,10 +640,11 @@ func getCPUMaskForPids(ctx context.Context, pidList []string, targetNode *corev1 for _, pid := range pidList { cmd := []string{"taskset", "-pc", pid} - cpumask, err := nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err := nodes.ExecCommand(ctx, targetNode, cmd) if err != nil { return nil, fmt.Errorf("failed to fetch cpus of %s: %s", pid, err) } + cpumask := testutils.ToString(out) mask := strings.SplitAfter(cpumask, " ") maskSet, err := cpuset.Parse(mask[len(mask)-1]) @@ -733,8 +753,9 @@ func ovsPids(ctx context.Context, ovsSystemdServices []string, workerRTNode *cor // taskSet returns cpus used by the pid func taskSet(ctx context.Context, pid string, workerRTNode *corev1.Node) cpuset.CPUSet { cmd := []string{"taskset", "-pc", pid} - output, err := nodes.ExecCommandToString(ctx, cmd, workerRTNode) + out, err := nodes.ExecCommand(ctx, workerRTNode, cmd) Expect(err).ToNot(HaveOccurred(), "unable to fetch cpus using taskset") + output := testutils.ToString(out) tasksetOutput := strings.Split(strings.TrimSpace(output), ":") cpus := strings.TrimSpace(tasksetOutput[1]) ctnCpuset, err := cpuset.Parse(cpus) diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go index b99a1891e4..b0f48bafe8 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go @@ -81,8 +81,9 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord } kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"} for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandToString(context.TODO(), kubeletArguments, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, kubeletArguments) Expect(err).ToNot(HaveOccurred()) + stdout := testutils.ToString(out) Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) } }) @@ -215,8 +216,9 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).ToNot(Equal(180)) } for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandToString(context.TODO(), kubeletArguments, &node) + out, err := nodes.ExecCommand(context.TODO(), &node, kubeletArguments) Expect(err).ToNot(HaveOccurred()) + stdout := testutils.ToString(out) Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) } diff --git a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go index 097157b0dc..8e69427477 100644 --- a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go +++ b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go @@ -731,8 +731,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific testlog.Infof("test pod %s with container id %s cgroup path %s", testpod.Name, containerID, cpusetCpusPath) By("Verify powersetting of cpus used by the pod") cmd = []string{"cat", cpusetCpusPath} - output, err := nodes.ExecCommandToString(context.TODO(), cmd, &workerRTNodes[0]) + out, err = nodes.ExecCommand(context.TODO(), &workerRTNodes[0], cmd) Expect(err).ToNot(HaveOccurred()) + output := testutils.ToString(out) cpus, err := cpuset.Parse(output) targetCpus := cpus.List() err = checkCpuGovernorsAndResumeLatency(context.TODO(), targetCpus, &workerRTNodes[0], "0", "schedutil") @@ -835,8 +836,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific testlog.Infof("test pod %s with container id %s cgroup path %s", testpod.Name, containerID, cpusetCpusPath) By("Verify powersetting of cpus used by the pod") cmd = []string{"cat", cpusetCpusPath} - output, err := nodes.ExecCommandToString(context.TODO(), cmd, &workerRTNodes[0]) + out, err = nodes.ExecCommand(context.TODO(), &workerRTNodes[0], cmd) Expect(err).ToNot(HaveOccurred()) + output := testutils.ToString(out) cpus, err := cpuset.Parse(output) targetCpus := cpus.List() err = checkCpuGovernorsAndResumeLatency(context.TODO(), targetCpus, &workerRTNodes[0], "n/a", "performance") @@ -939,16 +941,18 @@ func deleteTestPod(ctx context.Context, testpod *corev1.Pod) { func checkCpuGovernorsAndResumeLatency(ctx context.Context, cpus []int, targetNode *corev1.Node, pm_qos string, governor string) error { for _, cpu := range cpus { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/power/pm_qos_resume_latency_us", cpu)} - output, err := nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err := nodes.ExecCommand(ctx, targetNode, cmd) if err != nil { return err } + output := testutils.ToString(out) Expect(output).To(Equal(pm_qos)) cmd = []string{"/bin/bash", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", cpu)} - output, err = nodes.ExecCommandToString(ctx, cmd, targetNode) + out, err = nodes.ExecCommand(ctx, targetNode, cmd) if err != nil { return err } + output = testutils.ToString(out) Expect(output).To(Equal(governor)) } return nil @@ -964,8 +968,9 @@ func checkHardwareCapability(ctx context.Context, workerRTNodes []corev1.Node) { Skip(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name)) } // Additional check so that test gets skipped on vm with fake numa - onlineCPUCount, err := nodes.ExecCommandToString(ctx, []string{"nproc", "--all"}, &node) + out, err := nodes.ExecCommand(ctx, &node, []string{"nproc", "--all"}) Expect(err).ToNot(HaveOccurred()) + onlineCPUCount := testutils.ToString(out) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) Expect(err).ToNot(HaveOccurred()) if onlineCPUInt < totalCpus { diff --git a/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go b/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go index da2ab412cf..17f0aa7c26 100644 --- a/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go +++ b/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go @@ -9,6 +9,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" + testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" ) @@ -32,9 +33,10 @@ func GetContainerRuntimeTypeFor(ctx context.Context, c client.Client, pod *corev "-c", fmt.Sprintf("/bin/awk -F '\"' '/runtime_path.*/ { print $2 }' %s", CRIORuntimeConfigFile), } - out, err := nodes.ExecCommandToString(ctx, cmd, node) + output, err := nodes.ExecCommand(ctx, node, cmd) if err != nil { return "", fmt.Errorf("failed to execute command on node; cmd=%q node=%q err=%v", cmd, node.Name, err) } + out := testutils.ToString(output) return filepath.Base(out), nil } diff --git a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go index 18451d728a..2751782c64 100644 --- a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go +++ b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go @@ -145,17 +145,6 @@ func ExecCommand(ctx context.Context, node *corev1.Node, command []string) ([]by return nodeInspector.ExecCommand(ctx, node, command) } -// ExecCommandToString returns the output of the command execution in a string format -func ExecCommandToString(ctx context.Context, cmd []string, node *corev1.Node) (string, error) { - out, err := ExecCommand(ctx, node, cmd) - if err != nil { - return "", err - } - - trimmedString := strings.Trim(string(out), "\n") - return strings.ReplaceAll(trimmedString, "\r", ""), nil -} - // GetKubeletConfig returns KubeletConfiguration loaded from the node /etc/kubernetes/kubelet.conf func GetKubeletConfig(ctx context.Context, node *corev1.Node) (*kubeletconfigv1beta1.KubeletConfiguration, error) { command := []string{"cat", path.Join("/rootfs", testutils.FilePathKubeletConfig)} @@ -217,15 +206,16 @@ func HasPreemptRTKernel(ctx context.Context, node *corev1.Node) error { // with rpm-ostree rpm -q is telling you what you're booted into always, // because ostree binds together (kernel, userspace) as a single commit. cmd := []string{"chroot", "/rootfs", "rpm", "-q", "kernel-rt-core"} - if _, err := ExecCommandToString(ctx, cmd, node); err != nil { + if _, err := ExecCommand(ctx, node, cmd); err != nil { return err } cmd = []string{"/bin/bash", "-c", "cat /rootfs/sys/kernel/realtime"} - out, err := ExecCommandToString(ctx, cmd, node) + output, err := ExecCommand(ctx, node, cmd) if err != nil { return err } + out := testutils.ToString(output) if out != "1" { return fmt.Errorf("RT kernel disabled") @@ -236,7 +226,12 @@ func HasPreemptRTKernel(ctx context.Context, node *corev1.Node) error { func GetDefaultSmpAffinityRaw(ctx context.Context, node *corev1.Node) (string, error) { cmd := []string{"cat", "/proc/irq/default_smp_affinity"} - return ExecCommandToString(ctx, cmd, node) + out, err := ExecCommand(ctx, node, cmd) + if err != nil { + return "", err + } + output := testutils.ToString(out) + return output, nil } // GetDefaultSmpAffinitySet returns the default smp affinity mask for the node @@ -256,10 +251,11 @@ func GetDefaultSmpAffinitySet(ctx context.Context, node *corev1.Node) (cpuset.CP // GetOnlineCPUsSet returns the list of online (being scheduled) CPUs on the node func GetOnlineCPUsSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, error) { command := []string{"cat", sysDevicesOnlineCPUs} - onlineCPUs, err := ExecCommandToString(ctx, command, node) + out, err := ExecCommand(ctx, node, command) if err != nil { return cpuset.New(), err } + onlineCPUs := testutils.ToString(out) return cpuset.Parse(onlineCPUs) } @@ -267,8 +263,9 @@ func GetOnlineCPUsSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, er // Use a random cpuID from the return value of GetOnlineCPUsSet if not sure func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuID)} - threadSiblingsList, err := ExecCommandToString(ctx, cmd, node) + out, err := ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred()) + threadSiblingsList := testutils.ToString(out) // how many thread sibling you have = SMT level // example: 2-way SMT means 2 threads sibling for each thread cpus, err := cpuset.Parse(strings.TrimSpace(string(threadSiblingsList))) @@ -279,11 +276,12 @@ func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int { // GetNumaNodes returns the number of numa nodes and the associated cpus as list on the node func GetNumaNodes(ctx context.Context, node *corev1.Node) (map[int][]int, error) { lscpuCmd := []string{"lscpu", "-e=node,core,cpu", "-J"} - cmdout, err := ExecCommandToString(ctx, lscpuCmd, node) + out, err := ExecCommand(ctx, node, lscpuCmd) var numaNode, cpu int if err != nil { return nil, err } + cmdout := testutils.ToString(out) numaCpus := make(map[int][]int) var result NumaNodes err = json.Unmarshal([]byte(cmdout), &result) @@ -305,7 +303,8 @@ func GetNumaNodes(ctx context.Context, node *corev1.Node) (map[int][]int, error) // GetCoreSiblings returns the siblings of core per numa node func GetCoreSiblings(ctx context.Context, node *corev1.Node) (map[int]map[int][]int, error) { lscpuCmd := []string{"lscpu", "-e=node,core,cpu", "-J"} - out, err := ExecCommandToString(ctx, lscpuCmd, node) + output, err := ExecCommand(ctx, node, lscpuCmd) + out := testutils.ToString(output) var result NumaNodes var numaNode, core, cpu int coreSiblings := make(map[int]map[int][]int) diff --git a/test/e2e/performanceprofile/functests/utils/tuned/tuned.go b/test/e2e/performanceprofile/functests/utils/tuned/tuned.go index 13f80ca5ca..9df7c859fd 100644 --- a/test/e2e/performanceprofile/functests/utils/tuned/tuned.go +++ b/test/e2e/performanceprofile/functests/utils/tuned/tuned.go @@ -48,11 +48,12 @@ func GetPod(ctx context.Context, node *corev1.Node) (*corev1.Pod, error) { func WaitForStalldTo(ctx context.Context, run bool, interval, timeout time.Duration, node *corev1.Node) error { return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { cmd := []string{"/bin/bash", "-c", "pidof stalld || echo \"stalld not running\""} - stalldPid, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) if err != nil { klog.Errorf("failed to execute command %q on node: %q; %v", cmd, node.Name, err) return false, err } + stalldPid := testutils.ToString(out) _, err = strconv.Atoi(stalldPid) if !run { // we don't want stalld to run if err == nil { @@ -71,9 +72,9 @@ func WaitForStalldTo(ctx context.Context, run bool, interval, timeout time.Durat func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[string]string, kernelParameters []string, stalld, rtkernel bool) { cmd := []string{"/bin/bash", "-c", "pidof stalld || echo \"stalld not running\""} By(fmt.Sprintf("Executing %q", cmd)) - stalldPid, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q; %v", cmd, node.Name, err) - + stalldPid := testutils.ToString(out) _, err = strconv.Atoi(stalldPid) if stalld { ExpectWithOffset(1, err).ToNot(HaveOccurred(), @@ -103,15 +104,17 @@ func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[strin for param, expected := range sysctlMap { cmd = []string{"sysctl", "-n", param} By(fmt.Sprintf("Executing %q", cmd)) - out, err := nodes.ExecCommandToString(ctx, cmd, node) + output, err := nodes.ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) + out := testutils.ToString(output) ExpectWithOffset(1, out).Should(Equal(expected), "parameter %s value is not %s", param, expected) } cmd = []string{"cat", "/proc/cmdline"} By(fmt.Sprintf("Executing %q", cmd)) - cmdline, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err = nodes.ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) + cmdline := testutils.ToString(out) for _, param := range kernelParameters { ExpectWithOffset(1, cmdline).To(ContainSubstring(param)) @@ -120,8 +123,9 @@ func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[strin if !rtkernel { cmd = []string{"uname", "-a"} By(fmt.Sprintf("Executing %q", cmd)) - kernel, err := nodes.ExecCommandToString(ctx, cmd, node) + out, err := nodes.ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) + kernel := testutils.ToString(out) ExpectWithOffset(1, kernel).To(ContainSubstring("Linux"), "kernel should be Linux") err = nodes.HasPreemptRTKernel(ctx, node) @@ -152,8 +156,9 @@ func AddPstateParameter(ctx context.Context, node *corev1.Node) string { totalCpus = 32 ) - onlineCPUCount, err := nodes.ExecCommandToString(ctx, []string{"nproc", "--all"}, node) - ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", onlineCPUCount, node.Name) + out, err := nodes.ExecCommand(ctx, node, []string{"nproc", "--all"}) + ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", out, node.Name) + onlineCPUCount := testutils.ToString(out) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to convert oneline cpu from string to integer on node: %q", node.Name) // In this case, it is a vm, so pstate must be disabled @@ -163,12 +168,13 @@ func AddPstateParameter(ctx context.Context, node *corev1.Node) string { cmd := []string{"cat", filePath} By(fmt.Sprintf("Executing %q", cmd)) - pName, err := nodes.ExecCommandToString(ctx, cmd, node) - testlog.Infof("found processor name %s for node %s", pName, node.Name) + out, err = nodes.ExecCommand(ctx, node, cmd) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) if err != nil { return pstateDisabled } + pName := testutils.ToString(out) + testlog.Infof("found processor name %s for node %s", pName, node.Name) for _, element := range processorsName { if element == pName { diff --git a/test/e2e/performanceprofile/functests/utils/utils.go b/test/e2e/performanceprofile/functests/utils/utils.go index 246b57dce7..37f5ea4e35 100644 --- a/test/e2e/performanceprofile/functests/utils/utils.go +++ b/test/e2e/performanceprofile/functests/utils/utils.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "fmt" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/bugzilla" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/jira" "os" "os/exec" "strings" "time" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/bugzilla" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/jira" + testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" . "github.com/onsi/ginkgo/v2" @@ -113,3 +114,8 @@ func KnownIssueBugzilla(bugId int) { testlog.Infof(fmt.Sprintf("Test is linked to a closed Bugzilla bug rhbz#%d - %s", bug.Id, bug.Summary)) } } + +func ToString(b []byte) string { + trimmedString := strings.Trim(string(b), "\n") + return strings.ReplaceAll(trimmedString, "\r", "") +}