From 12478875a21b5b0e4f8b88c30289c6527f94919a Mon Sep 17 00:00:00 2001 From: Ronny Baturov Date: Mon, 15 Apr 2024 12:03:03 +0300 Subject: [PATCH] Moved functions for executing commands on nodes to node inspector. The node inspector should be used to run commands on nodes. Therefore, I have moved this logic there. The reason for this is that it is clear that we are using the node inspector to executing commands, and it should be running. Signed-off-by: Ronny Baturov --- .../functests/11_mixedcpus/mixedcpus.go | 7 +- .../functests/1_performance/cpu_management.go | 31 ++++----- .../functests/1_performance/hugepages.go | 3 +- .../functests/1_performance/irqbalance.go | 7 +- .../functests/1_performance/netqueues.go | 5 +- .../functests/1_performance/performance.go | 53 ++++++++------- .../functests/1_performance/rt-kernel.go | 3 +- .../2_performance_update/memorymanager.go | 5 +- .../2_performance_update/updating_profile.go | 43 ++++++------ .../7_performance_kubelet_node/cgroups.go | 23 ++++--- .../7_performance_kubelet_node/kubelet.go | 5 +- .../workloadhints.go | 17 ++--- .../functests/9_reboot/devices.go | 3 +- .../functests/Z_deconfig/deconfig.go | 2 +- .../functests/utils/cgroup/runtime/runtime.go | 5 +- .../functests/utils/infrastructure/vm.go | 4 +- .../utils/node_inspector/inspector.go | 55 ++++++++++++++- .../functests/utils/nodes/nodes.go | 67 ++++--------------- .../functests/utils/systemd/systemd.go | 6 +- .../functests/utils/tuned/tuned.go | 11 +-- 20 files changed, 190 insertions(+), 165 deletions(-) diff --git a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go index b37f6f26ba..0fe62175a5 100644 --- a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go +++ b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go @@ -35,6 +35,7 @@ import ( testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/namespaces" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -80,7 +81,7 @@ var _ = Describe("Mixedcpus", Ordered, func() { // test arbitrary one should be good enough worker := &workers[0] cmd := isFileExistCmd(kubeletMixedCPUsConfigFile) - found, err := nodes.ExecCommandOnNode(ctx, cmd, worker) + found, err := nodeInspector.ExecCommandOnNode(ctx, cmd, worker) Expect(err).ToNot(HaveOccurred(), "failed to execute command on node; cmd=%q node=%q", cmd, worker) Expect(found).To(Equal("true"), "file not found; file=%q", kubeletMixedCPUsConfigFile) }) @@ -111,7 +112,7 @@ var _ = Describe("Mixedcpus", Ordered, func() { "-c", fmt.Sprintf("/bin/awk -F '\"' '/shared_cpuset.*/ { print $2 }' %s", runtime.CRIORuntimeConfigFile), } - cpus, err := nodes.ExecCommandOnNode(ctx, cmd, worker) + cpus, err := nodeInspector.ExecCommandOnNode(ctx, cmd, worker) Expect(err).ToNot(HaveOccurred(), "failed to execute command on node; cmd=%q node=%q", cmd, worker) cpus = strings.Trim(cpus, "\n") crioShared := mustParse(cpus) @@ -235,7 +236,7 @@ var _ = Describe("Mixedcpus", Ordered, func() { cmd := kubeletRestartCmd() // The command would fail since it aborts all the pods during restart - _, _ = nodes.ExecCommandOnNode(ctx, cmd, node) + _, _ = nodeInspector.ExecCommandOnNode(ctx, cmd, node) // check that the node is ready after we restart Kubelet nodes.WaitForReadyOrFail("post restart", node.Name, 20*time.Minute, 3*time.Second) diff --git a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go index fe4536da93..028b5ef51b 100644 --- a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go +++ b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go @@ -33,6 +33,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/events" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/images" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -117,7 +118,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { It("[test_id:37862][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU affinity mask, CPU reservation and CPU isolation on worker node", func() { By("checking isolated CPU") cmd := []string{"cat", "/sys/devices/system/cpu/isolated"} - sysIsolatedCpus, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + sysIsolatedCpus, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) if balanceIsolated { Expect(sysIsolatedCpus).To(BeEmpty()) @@ -127,14 +128,14 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("checking reserved CPU in kubelet config file") cmd = []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"} - conf, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + conf, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred(), "failed to cat kubelet.conf") // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex Expect(conf).To(MatchRegexp(fmt.Sprintf(`"reservedSystemCPUs": ?"%s"`, reservedCPU))) By("checking CPU affinity mask for kernel scheduler") cmd = []string{"/bin/bash", "-c", "taskset -pc 1"} - sched, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + sched, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred(), "failed to execute taskset") mask := strings.SplitAfter(sched, " ") maskSet, err := cpuset.Parse(mask[len(mask)-1]) @@ -146,7 +147,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { It("[test_id:34358] Verify rcu_nocbs kernel argument on the node", func() { By("checking that cmdline contains rcu_nocbs with right value") cmd := []string{"cat", "/proc/cmdline"} - cmdline, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + cmdline, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) re := regexp.MustCompile(`rcu_nocbs=\S+`) rcuNocbsArgument := re.FindString(cmdline) @@ -156,12 +157,12 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("checking that new rcuo processes are running on non_isolated cpu") cmd = []string{"pgrep", "rcuo"} - rcuoList, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + rcuoList, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) for _, rcuo := range strings.Split(rcuoList, "\n") { // check cpu affinity mask cmd = []string{"/bin/bash", "-c", fmt.Sprintf("taskset -pc %s", rcuo)} - taskset, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + taskset, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) mask := strings.SplitAfter(taskset, " ") maskSet, err := cpuset.Parse(mask[len(mask)-1]) @@ -239,7 +240,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { "unexpected QoS Class for %s/%s: %s (looking for %s)", updatedPod.Namespace, updatedPod.Name, updatedPod.Status.QOSClass, expectedQos) - output, err := nodes.ExecCommandOnNode(ctx, + output, err := nodeInspector.ExecCommandOnNode(ctx, []string{"/bin/bash", "-c", "ps -o psr $(pgrep -n stress) | tail -1"}, workerRTNode, ) @@ -314,7 +315,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { // It may takes some time for the system to reschedule active IRQs Eventually(func() bool { getActiveIrq := []string{"/bin/bash", "-c", "for n in $(find /proc/irq/ -name smp_affinity_list); do echo $(cat $n); done"} - activeIrq, err := nodes.ExecCommandOnNode(context.TODO(), getActiveIrq, workerRTNode) + activeIrq, err := nodeInspector.ExecCommandOnNode(context.TODO(), getActiveIrq, workerRTNode) Expect(err).ToNot(HaveOccurred()) Expect(activeIrq).ToNot(BeEmpty()) for _, irq := range strings.Split(activeIrq, "\n") { @@ -368,7 +369,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { Eventually(func() string { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name *%s*", cpusetPath, podUID)} - podCgroup, err = nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + podCgroup, err = nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) return podCgroup }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), @@ -377,7 +378,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { containersCgroups := "" Eventually(func() string { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name crio-*", podCgroup)} - containersCgroups, err = nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + containersCgroups, err = nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) return containersCgroups }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), @@ -398,7 +399,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("Checking what CPU the infra container is using") cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", dir)} - output, err := nodes.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) + output, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, workerRTNode) Expect(err).ToNot(HaveOccurred()) cpus, err := cpuset.Parse(output) @@ -673,7 +674,7 @@ func checkForWorkloadPartitioning(ctx context.Context) bool { "-c", "echo CHECK ; /bin/grep -rEo 'activation_annotation.*target\\.workload\\.openshift\\.io/management.*' /etc/crio/crio.conf.d/ || true", } - output, err := nodes.ExecCommandOnNode(ctx, cmd, workerRTNode) + output, err := nodeInspector.ExecCommandOnNode(ctx, cmd, workerRTNode) Expect(err).ToNot(HaveOccurred(), "Unable to check cluster for Workload Partitioning enabled") re := regexp.MustCompile(`activation_annotation.*target\.workload\.openshift\.io/management.*`) return re.MatchString(fmt.Sprint(output)) @@ -694,7 +695,7 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { node, err := nodes.GetByName(testpod.Spec.NodeName) Expect(err).ToNot(HaveOccurred(), "failed to get node %q", testpod.Spec.NodeName) Expect(testpod.Spec.NodeName).ToNot(BeEmpty(), "testpod %s/%s still pending - no nodeName set", testpod.Namespace, testpod.Name) - output, err := nodes.ExecCommandOnNode(ctx, cmd, node) + output, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) Expect(err).ToNot(HaveOccurred(), "Unable to crictl inspect containerID %q", containerID) podcpus, err := cpuset.Parse(strings.Trim(output, "\n")) @@ -718,7 +719,7 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { "-c", fmt.Sprintf("/bin/cat %s | /bin/sort -u", hostHTSiblingPaths.String()), } - output, err = nodes.ExecCommandOnNode(ctx, cmd, workerRTNode) + output, err = nodeInspector.ExecCommandOnNode(ctx, cmd, workerRTNode) Expect(err).ToNot( HaveOccurred(), "Unable to read host thread_siblings_list files", @@ -899,7 +900,7 @@ func logEventsForPod(testPod *corev1.Pod) { // getCPUswithLoadBalanceDisabled Return cpus which are not in any scheduling domain func getCPUswithLoadBalanceDisabled(ctx context.Context, targetNode *corev1.Node) ([]string, error) { cmd := []string{"/bin/bash", "-c", "cat /proc/schedstat"} - schedstatData, err := nodes.ExecCommandOnNode(ctx, cmd, targetNode) + schedstatData, err := nodeInspector.ExecCommandOnNode(ctx, cmd, targetNode) if err != nil { return nil, err } diff --git a/test/e2e/performanceprofile/functests/1_performance/hugepages.go b/test/e2e/performanceprofile/functests/1_performance/hugepages.go index 179910984c..44168d8c0e 100644 --- a/test/e2e/performanceprofile/functests/1_performance/hugepages.go +++ b/test/e2e/performanceprofile/functests/1_performance/hugepages.go @@ -25,6 +25,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/images" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -189,7 +190,7 @@ var _ = Describe("[performance]Hugepages", Ordered, func() { func checkHugepagesStatus(ctx context.Context, path string, workerRTNode *corev1.Node) int { command := []string{"cat", path} - out, err := nodes.ExecCommandOnMachineConfigDaemon(ctx, workerRTNode, command) + out, err := nodeInspector.ExecCommandOnDaemon(ctx, workerRTNode, command) Expect(err).ToNot(HaveOccurred()) n, err := strconv.Atoi(strings.Trim(string(out), "\n\r")) Expect(err).ToNot(HaveOccurred()) diff --git a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go index dcc0bd3647..bebc99caf6 100644 --- a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go +++ b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go @@ -29,6 +29,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -313,7 +314,7 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() { origBannedCPUsFile := "/etc/sysconfig/orig_irq_banned_cpus" By(fmt.Sprintf("Checking content of %q on node %q", origBannedCPUsFile, node.Name)) fullPath := filepath.Join("/", "rootfs", origBannedCPUsFile) - out, err := nodes.ExecCommandOnNode(context.TODO(), []string{"/usr/bin/cat", fullPath}, node) + out, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"/usr/bin/cat", fullPath}, node) Expect(err).ToNot(HaveOccurred()) out = strings.TrimSuffix(out, "\r\n") Expect(out).To(Equal("0"), "file %s does not contain the expect output; expected=0 actual=%s", fullPath, out) @@ -327,7 +328,7 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() { // require close attention. For the time being we reimplement a form of nodes.BannedCPUs which can handle empty ban list. func getIrqBalanceBannedCPUs(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, error) { cmd := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"} - conf, err := nodes.ExecCommandOnNode(ctx, cmd, node) + conf, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) if err != nil { return cpuset.New(), err } @@ -361,7 +362,7 @@ func getIrqBalanceBannedCPUs(ctx context.Context, node *corev1.Node) (cpuset.CPU func getIrqDefaultSMPAffinity(ctx context.Context, node *corev1.Node) (string, error) { cmd := []string{"cat", "/rootfs/proc/irq/default_smp_affinity"} - return nodes.ExecCommandOnNode(ctx, cmd, node) + return nodeInspector.ExecCommandOnNode(ctx, cmd, node) } func findIrqBalanceBannedCPUsVarFromConf(conf string) string { diff --git a/test/e2e/performanceprofile/functests/1_performance/netqueues.go b/test/e2e/performanceprofile/functests/1_performance/netqueues.go index a20f842f95..a54f820107 100644 --- a/test/e2e/performanceprofile/functests/1_performance/netqueues.go +++ b/test/e2e/performanceprofile/functests/1_performance/netqueues.go @@ -24,6 +24,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -334,7 +335,7 @@ func getReservedCPUSize(CPU *performancev2.CPU) int { func getVendorID(ctx context.Context, node corev1.Node, device string) string { cmd := []string{"bash", "-c", fmt.Sprintf("cat /sys/class/net/%s/device/vendor", device)} - stdout, err := nodes.ExecCommandOnNode(ctx, cmd, &node) + stdout, err := nodeInspector.ExecCommandOnNode(ctx, cmd, &node) Expect(err).ToNot(HaveOccurred()) return stdout } @@ -342,7 +343,7 @@ func getVendorID(ctx context.Context, node corev1.Node, device string) string { func getDeviceID(ctx context.Context, node corev1.Node, device string) string { cmd := []string{"bash", "-c", fmt.Sprintf("cat /sys/class/net/%s/device/device", device)} - stdout, err := nodes.ExecCommandOnNode(ctx, cmd, &node) + stdout, err := nodeInspector.ExecCommandOnNode(ctx, cmd, &node) Expect(err).ToNot(HaveOccurred()) return stdout } diff --git a/test/e2e/performanceprofile/functests/1_performance/performance.go b/test/e2e/performanceprofile/functests/1_performance/performance.go index 72fb38ed20..41a157acb5 100644 --- a/test/e2e/performanceprofile/functests/1_performance/performance.go +++ b/test/e2e/performanceprofile/functests/1_performance/performance.go @@ -41,6 +41,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/infrastructure" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -143,7 +144,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { It("[test_id:31198] Should set CPU affinity kernel argument", func() { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) // since systemd.cpu_affinity is calculated on node level using tuned we can check only the key in this context. Expect(string(cmdline)).To(ContainSubstring("systemd.cpu_affinity=")) @@ -152,7 +153,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { It("[test_id:32702] Should set CPU isolcpu's kernel argument managed_irq flag", func() { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false { Expect(string(cmdline)).To(ContainSubstring("isolcpus=domain,managed_irq,")) @@ -165,7 +166,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { It("[test_id:27081][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set workqueue CPU mask", func() { for _, node := range workerRTNodes { By(fmt.Sprintf("Getting tuned.non_isolcpus kernel argument on %q", node.Name)) - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) re := regexp.MustCompile(`tuned.non_isolcpus=\S+`) nonIsolcpusFullArgument := re.FindString(string(cmdline)) @@ -186,13 +187,13 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { } By(fmt.Sprintf("Getting the virtual workqueue mask (/sys/devices/virtual/workqueue/cpumask) on %q", node.Name)) - workqueueMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/sys/devices/virtual/workqueue/cpumask"}) + workqueueMaskData, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/sys/devices/virtual/workqueue/cpumask"}) Expect(err).ToNot(HaveOccurred()) workqueueMask := getTrimmedMaskFromData("virtual", workqueueMaskData) expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueMask) By(fmt.Sprintf("Getting the writeback workqueue mask (/sys/bus/workqueue/devices/writeback/cpumask) on %q", node.Name)) - workqueueWritebackMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/sys/bus/workqueue/devices/writeback/cpumask"}) + workqueueWritebackMaskData, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/sys/bus/workqueue/devices/writeback/cpumask"}) Expect(err).ToNot(HaveOccurred()) workqueueWritebackMask := getTrimmedMaskFromData("workqueue", workqueueWritebackMaskData) expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueWritebackMask) @@ -204,12 +205,12 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { // updating the field to 4 as the latest proc/cmdline has been updated to // BOOT_IMAGE=(hd0,gpt3)/boot/ostree/rhcos- instead of BOOT_IMAGE=(hd1,gpt3)/ostree/rhcos- // TODO: Modify the awk script to be resilent to these changes or check if we can remove it completely - rhcosId, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"awk", "-F", "/", "{printf $4}", "/rootfs/proc/cmdline"}) + rhcosId, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"awk", "-F", "/", "{printf $4}", "/rootfs/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) - initramfsImagesPath, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"find", filepath.Join("/rootfs/boot/ostree", string(rhcosId)), "-name", "*.img"}) + initramfsImagesPath, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"find", filepath.Join("/rootfs/boot/ostree", string(rhcosId)), "-name", "*.img"}) Expect(err).ToNot(HaveOccurred()) modifiedImagePath := strings.TrimPrefix(strings.TrimSpace(string(initramfsImagesPath)), "/rootfs") - initrd, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"chroot", "/rootfs", "lsinitrd", modifiedImagePath}) + initrd, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"chroot", "/rootfs", "lsinitrd", modifiedImagePath}) Expect(err).ToNot(HaveOccurred()) Expect(string(initrd)).ShouldNot(ContainSubstring("'/etc/systemd/system.conf /etc/systemd/system.conf.d/setAffinity.conf'")) } @@ -224,10 +225,10 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }) It("[test_id:42400][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running as sched_fifo", func() { for _, node := range workerRTNodes { - pid, err := nodes.ExecCommandOnNode(context.TODO(), []string{"pidof", "stalld"}, &node) + pid, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"pidof", "stalld"}, &node) Expect(err).ToNot(HaveOccurred()) Expect(pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", pid}, &node) + sched_tasks, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", pid}, &node) Expect(err).ToNot(HaveOccurred()) Expect(sched_tasks).To(ContainSubstring("scheduling policy: SCHED_FIFO")) Expect(sched_tasks).To(ContainSubstring("scheduling priority: 10")) @@ -235,20 +236,20 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }) It("[test_id:42696][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] Stalld runs in higher priority than ksoftirq and rcu{c,b}", func() { for _, node := range workerRTNodes { - stalld_pid, err := nodes.ExecCommandOnNode(context.TODO(), []string{"pidof", "stalld"}, &node) + stalld_pid, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"pidof", "stalld"}, &node) Expect(err).ToNot(HaveOccurred()) Expect(stalld_pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", stalld_pid}, &node) + sched_tasks, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", stalld_pid}, &node) Expect(err).ToNot(HaveOccurred()) re := regexp.MustCompile("scheduling priority: ([0-9]+)") match := re.FindStringSubmatch(sched_tasks) stalld_prio, err := strconv.Atoi(match[1]) Expect(err).ToNot(HaveOccurred()) - ksoftirq_pid, err := nodes.ExecCommandOnNode(context.TODO(), []string{"pgrep", "-f", "ksoftirqd", "-n"}, &node) + ksoftirq_pid, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"pgrep", "-f", "ksoftirqd", "-n"}, &node) Expect(err).ToNot(HaveOccurred()) Expect(ksoftirq_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", ksoftirq_pid}, &node) + sched_tasks, err = nodeInspector.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", ksoftirq_pid}, &node) Expect(err).ToNot(HaveOccurred()) match = re.FindStringSubmatch(sched_tasks) ksoftirq_prio, err := strconv.Atoi(match[1]) @@ -263,10 +264,10 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { } //rcuc/n : kthreads that are pinned to CPUs & are responsible to execute the callbacks of rcu threads . //rcub/n : are boosting kthreads ,responsible to monitor per-cpu arrays of lists of tasks that were blocked while in an rcu read-side critical sections. - rcu_pid, err := nodes.ExecCommandOnNode(context.TODO(), []string{"pgrep", "-f", "rcu[c,b]", "-n"}, &node) + rcu_pid, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"pgrep", "-f", "rcu[c,b]", "-n"}, &node) Expect(err).ToNot(HaveOccurred()) Expect(rcu_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", rcu_pid}, &node) + sched_tasks, err = nodeInspector.ExecCommandOnNode(context.TODO(), []string{"chrt", "-ap", rcu_pid}, &node) Expect(err).ToNot(HaveOccurred()) match = re.FindStringSubmatch(sched_tasks) rcu_prio, err := strconv.Atoi(match[1]) @@ -283,7 +284,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { It("[test_id:28611][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set additional kernel arguments on the machine", func() { if profile.Spec.AdditionalKernelArgs != nil { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) for _, arg := range profile.Spec.AdditionalKernelArgs { Expect(string(cmdline)).To(ContainSubstring(arg)) @@ -307,7 +308,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { } // Getting the list of processes that are running on the root cgroup, filtering out the kernel threads (are presented in [square brackets]). command := fmt.Sprintf("cat %s | xargs ps -o cmd | grep -v \"\\[\"", rootCgroupPath) - output, err := nodes.ExecCommandOnNode(context.TODO(), []string{"/bin/bash", "-c", command}, &node) + output, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"/bin/bash", "-c", command}, &node) Expect(err).ToNot(HaveOccurred()) cmds := strings.Split(output, "\n") processesFound = append(processesFound, cmds[1:]...) @@ -370,7 +371,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, vethinterface := range vethInterfaces { devicePath := fmt.Sprintf("%s/%s", "/rootfs/sys/devices/virtual/net", vethinterface) getRPSMaskCmd := []string{"find", devicePath, "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"} - devsRPS, err := nodes.ExecCommandOnNode(context.TODO(), getRPSMaskCmd, &node) + devsRPS, err := nodeInspector.ExecCommandOnNode(context.TODO(), getRPSMaskCmd, &node) Expect(err).ToNot(HaveOccurred()) for _, devRPS := range strings.Split(devsRPS, "\n") { rpsCPUs, err := components.CPUMaskToCPUSet(devRPS) @@ -410,7 +411,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, node := range workerRTNodes { By("verify the systemd RPS service uses the correct RPS mask") cmd := []string{"sysctl", "-n", "net.core.rps_default_mask"} - rpsMaskContent, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + rpsMaskContent, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node) rpsMaskContent = strings.TrimSuffix(rpsMaskContent, "\n") rpsCPUs, err := components.CPUMaskToCPUSet(rpsMaskContent) @@ -427,7 +428,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { "-printf", "%p ", "-exec", "cat", "{}", ";", } - devsRPSContent, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + devsRPSContent, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) devsRPSMap := makeDevRPSMap(devsRPSContent) for path, mask := range devsRPSMap { @@ -446,7 +447,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { "-printf", "%p ", "-exec", "cat", "{}", ";", } - devsRPSContent, err = nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + devsRPSContent, err = nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) devsRPSMap = makeDevRPSMap(devsRPSContent) @@ -463,7 +464,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, node := range workerRTNodes { // Verify the systemd RPS services were not created cmd := []string{"ls", "/rootfs/etc/systemd/system/update-rps@.service"} - _, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + _, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).To(HaveOccurred()) } } @@ -1035,7 +1036,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { It("[test_id:54083] Should have kernel param rcutree.kthread", func() { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred(), "Failed to read /proc/cmdline") Expect(string(cmdline)).To(ContainSubstring("rcutree.kthread_prio=11"), "Boot Parameters should contain rctree.kthread_prio=11") } @@ -1325,7 +1326,7 @@ func execSysctlOnWorkers(ctx context.Context, workerNodes []corev1.Node, sysctlM for _, node := range workerNodes { for param, expected := range sysctlMap { By(fmt.Sprintf("executing the command \"sysctl -n %s\"", param)) - out, err = nodes.ExecCommandOnMachineConfigDaemon(ctx, &node, []string{"sysctl", "-n", param}) + out, err = nodeInspector.ExecCommandOnDaemon(ctx, &node, []string{"sysctl", "-n", param}) Expect(err).ToNot(HaveOccurred()) Expect(strings.TrimSpace(string(out))).Should(Equal(expected), "parameter %s value is not %s.", param, expected) } @@ -1340,7 +1341,7 @@ func checkSchedKnobs(ctx context.Context, workerNodes []corev1.Node, schedKnobs for param, expected := range schedKnobs { By(fmt.Sprintf("Checking scheduler knob %s", param)) knob := fmt.Sprintf("/rootfs/sys/kernel/debug/sched/%s", param) - out, err = nodes.ExecCommandOnMachineConfigDaemon(ctx, &node, []string{"cat", knob}) + out, err = nodeInspector.ExecCommandOnDaemon(ctx, &node, []string{"cat", knob}) Expect(err).ToNot(HaveOccurred()) Expect(strings.TrimSpace(string(out))).Should(Equal(expected), "parameter %s value is not %s.", param, expected) } diff --git a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go index 6bc4ae2431..e103ae6d2e 100644 --- a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go +++ b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go @@ -10,6 +10,7 @@ import ( performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" ) @@ -65,7 +66,7 @@ var _ = Describe("[performance]RT Kernel", Ordered, func() { } cmd := []string{"uname", "-a"} - kernel, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &nonPerformancesWorkers[0]) + kernel, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &nonPerformancesWorkers[0]) Expect(err).ToNot(HaveOccurred(), "failed to execute uname") Expect(kernel).To(ContainSubstring("Linux"), "Node should have Linux string") diff --git a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go index 3492e4486e..95af6e046c 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go @@ -19,6 +19,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/events" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -813,7 +814,7 @@ func GetMemoryNodes(ctx context.Context, testPod *corev1.Pod, targetNode *corev1 } pid, err := nodes.ContainerPid(context.TODO(), targetNode, containerID) cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), targetNode, cmd) + out, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), targetNode, cmd) containerCgroup, err = cgroup.PidParser(out) fmt.Println("Container Cgroup = ", containerCgroup) cgroupv2, err := cgroup.IsVersion2(context.TODO(), testclient.Client) @@ -828,7 +829,7 @@ func GetMemoryNodes(ctx context.Context, testPod *corev1.Pod, targetNode *corev1 cpusetMemsPath = filepath.Join(fullPath, "cpuset.mems") } cmd = []string{"cat", cpusetMemsPath} - memoryNodes, err = nodes.ExecCommandOnNode(ctx, cmd, targetNode) + memoryNodes, err = nodeInspector.ExecCommandOnNode(ctx, cmd, targetNode) testlog.Infof("test pod %s with container id %s has Memory nodes %s", testPod.Name, containerID, memoryNodes) return memoryNodes, err } diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 76cb981a98..cc8d22b6a4 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -31,6 +31,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" ) @@ -48,10 +49,10 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance chkIrqbalance := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"} chkCmdLineFn := func(ctx context.Context, node *corev1.Node) (string, error) { - return nodes.ExecCommandOnNode(ctx, chkCmdLine, node) + return nodeInspector.ExecCommandOnNode(ctx, chkCmdLine, node) } chkKubeletConfigFn := func(ctx context.Context, node *corev1.Node) (string, error) { - return nodes.ExecCommandOnNode(ctx, chkKubeletConfig, node) + return nodeInspector.ExecCommandOnNode(ctx, chkKubeletConfig, node) } chkHugepages2MFn := func(ctx context.Context, node *corev1.Node) (string, error) { @@ -166,7 +167,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance for _, node := range workerRTNodes { for i := 0; i < 2; i++ { nodeCmd := []string{"cat", hugepagesPathForNode(i, 2)} - result, err := nodes.ExecCommandOnNode(context.TODO(), nodeCmd, &node) + result, err := nodeInspector.ExecCommandOnNode(context.TODO(), nodeCmd, &node) Expect(err).ToNot(HaveOccurred()) t, err := strconv.Atoi(result) @@ -307,7 +308,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:28612]Verify that Kernel arguments can me updated (added, removed) thru performance profile", func() { for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnNode(context.TODO(), chkCmdLine, &node) + cmdline, err := nodeInspector.ExecCommandOnNode(context.TODO(), chkCmdLine, &node) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) // Verifying that new argument was added @@ -446,7 +447,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:28440]Verifies that nodeSelector can be updated in performance profile", func() { kubeletConfig, err := nodes.GetKubeletConfig(context.TODO(), newCnfNode) Expect(kubeletConfig.TopologyManagerPolicy).ToNot(BeEmpty()) - cmdline, err := nodes.ExecCommandOnNode(context.TODO(), chkCmdLine, newCnfNode) + cmdline, err := nodeInspector.ExecCommandOnNode(context.TODO(), chkCmdLine, newCnfNode) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) Expect(cmdline).To(ContainSubstring("tuned.non_isolcpus")) @@ -467,7 +468,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance err = nodes.HasPreemptRTKernel(context.TODO(), newCnfNode) Expect(err).To(HaveOccurred()) - cmdline, err := nodes.ExecCommandOnNode(context.TODO(), chkCmdLine, newCnfNode) + cmdline, err := nodeInspector.ExecCommandOnNode(context.TODO(), chkCmdLine, newCnfNode) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) Expect(cmdline).NotTo(ContainSubstring("tuned.non_isolcpus")) @@ -478,7 +479,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance reservedCPU := string(*profile.Spec.CPU.Reserved) cpuMask, err := components.CPUListToHexMask(reservedCPU) Expect(err).ToNot(HaveOccurred(), "failed to list in Hex %s", reservedCPU) - irqBal, err := nodes.ExecCommandOnNode(context.TODO(), chkIrqbalance, newCnfNode) + irqBal, err := nodeInspector.ExecCommandOnNode(context.TODO(), chkIrqbalance, newCnfNode) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkIrqbalance) Expect(irqBal).NotTo(ContainSubstring(cpuMask)) }) @@ -558,7 +559,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance initialProfile = profile.DeepCopy() for _, node := range workerRTNodes { - onlineCPUCount, err := nodes.ExecCommandOnNode(context.TODO(), []string{"nproc", "--all"}, &node) + onlineCPUCount, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"nproc", "--all"}, &node) Expect(err).ToNot(HaveOccurred()) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) @@ -602,7 +603,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + offlinedOutput, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) Expect(err).ToNot(HaveOccurred()) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlined)) @@ -670,7 +671,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() // Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + offlinedOutput, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) Expect(err).ToNot(HaveOccurred()) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) @@ -734,7 +735,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + offlinedOutput, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) Expect(err).ToNot(HaveOccurred()) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) @@ -808,7 +809,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + offlinedOutput, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) Expect(err).ToNot(HaveOccurred()) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) @@ -924,7 +925,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance workerRTNodes = getUpdatedNodes() //Check offlined cpus are setting correctly for _, node := range workerRTNodes { - offlinedOutput, err := nodes.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) + offlinedOutput, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"cat", "/sys/devices/system/cpu/offline"}, &node) Expect(err).ToNot(HaveOccurred()) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) @@ -960,7 +961,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance findcmd := `find /sys/devices/system/cpu/cpu* -type f -name online -exec cat {} \;` checkCpuStatusCmd := []string{"bash", "-c", findcmd} for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandOnNode(context.TODO(), checkCpuStatusCmd, &node) + stdout, err := nodeInspector.ExecCommandOnNode(context.TODO(), checkCpuStatusCmd, &node) Expect(err).NotTo(HaveOccurred()) v := strings.Split(stdout, "\n") for _, val := range v { @@ -1006,7 +1007,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance var reserved, isolated []string var onlineCPUInt int for _, node := range workerRTNodes { - onlineCPUCount, err := nodes.ExecCommandOnNode(context.TODO(), []string{"nproc", "--all"}, &node) + onlineCPUCount, err := nodeInspector.ExecCommandOnNode(context.TODO(), []string{"nproc", "--all"}, &node) Expect(err).ToNot(HaveOccurred()) onlineCPUInt, err = strconv.Atoi(onlineCPUCount) Expect(err).ToNot(HaveOccurred()) @@ -1065,7 +1066,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance // Verify the systemd RPS service uses the correct RPS mask var maskContent string cmd := []string{"sysctl", "-n", "net.core.rps_default_mask"} - maskContent, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + maskContent, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node) rpsMaskContent := strings.Trim(maskContent, "\n") rpsCPUs, err := components.CPUMaskToCPUSet(rpsMaskContent) @@ -1081,7 +1082,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance "-name", "rps_cpus", "-exec", "cat", "{}", ";", } - devsRPS, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + devsRPS, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "failed to exec command %q on node %q", cmd, node.Name) for _, devRPS := range strings.Split(devsRPS, "\n") { rpsCPUs, err = components.CPUMaskToCPUSet(devRPS) @@ -1114,7 +1115,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance for _, node := range workerRTNodes { // Verify the systemd RPS services were not created cmd := []string{"ls", "/rootfs/etc/systemd/system/update-rps@.service"} - _, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + _, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).To(HaveOccurred()) } }) @@ -1141,7 +1142,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance } cmd := []string{"cat", "/rootfs/etc/crio/crio.conf.d/99-runtimes.conf"} for i := 0; i < len(workerRTNodes); i++ { - out, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[i]) + out, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[i]) Expect(err).ToNot(HaveOccurred(), "cannot get 99-runtimes.conf from %q", workerRTNodes[i].Name) By(fmt.Sprintf("checking node: %q", workerRTNodes[i].Name)) Expect(out).To(ContainSubstring("/bin/runc")) @@ -1173,7 +1174,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(ctrcfg.Spec.ContainerRuntimeConfig.DefaultRuntime == machineconfigv1.ContainerRuntimeDefaultRuntimeCrun).To(BeTrue()) cmd := []string{"cat", "/rootfs/etc/crio/crio.conf.d/99-runtimes.conf"} for i := 0; i < len(workerRTNodes); i++ { - out, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[i]) + out, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[i]) Expect(err).ToNot(HaveOccurred(), "cannot get 99-runtimes.conf from %q", workerRTNodes[i].Name) By(fmt.Sprintf("checking node: %q", workerRTNodes[i].Name)) Expect(out).To(ContainSubstring("/usr/bin/crun")) @@ -1214,7 +1215,7 @@ func countHugepagesOnNode(ctx context.Context, node *corev1.Node, sizeInMb int) count := 0 for i := 0; i < len(numaInfo); i++ { nodeCmd := []string{"cat", hugepagesPathForNode(i, sizeInMb)} - result, err := nodes.ExecCommandOnNode(ctx, nodeCmd, node) + result, err := nodeInspector.ExecCommandOnNode(ctx, nodeCmd, node) if err != nil { return 0, err } diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go index 5e95ac8de5..b96ca12315 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go @@ -31,6 +31,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/images" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -96,7 +97,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { It("[test_id:64097] Activation file is created", func() { cmd := []string{"ls", activation_file} for _, node := range workerRTNodes { - out, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + out, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "file %s doesn't exist ", activation_file) Expect(out).To(Equal(activation_file)) } @@ -111,7 +112,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { pid, err := nodes.ContainerPid(ctx, workerRTNode, ctn) Expect(err).ToNot(HaveOccurred()) cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), workerRTNode, cmd) + out, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) cgroupPathOfPid, err := cgroup.PidParser(out) if isCgroupV2 { @@ -121,7 +122,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { containerCgroupPath = filepath.Join(controller, cgroupPathOfPid) } cmd = []string{"cat", fmt.Sprintf("%s", filepath.Join(containerCgroupPath, "/cpuset.cpus"))} - cpus, err := nodes.ExecCommandOnNode(ctx, cmd, workerRTNode) + cpus, err := nodeInspector.ExecCommandOnNode(ctx, cmd, workerRTNode) containerCpuset, err := cpuset.Parse(cpus) Expect(containerCpuset).To(Equal(onlineCPUSet), "Burstable pod containers cpuset.cpus do not match total online cpus") } @@ -158,7 +159,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { By("Checking Activation file") cmd := []string{"ls", activation_file} for _, node := range workerRTNodes { - out, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &node) + out, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &node) Expect(err).ToNot(HaveOccurred(), "file %s doesn't exist ", activation_file) Expect(out).To(Equal(activation_file)) } @@ -197,7 +198,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { pids, err := ovsPids(ctx, ovsSystemdServices, workerRTNode) Expect(err).ToNot(HaveOccurred(), "unable to fetch pid of ovs services") cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pids[0])} - out, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), workerRTNode, cmd) + out, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), workerRTNode, cmd) Expect(err).ToNot(HaveOccurred()) ovsSliceCgroup, err = cgroup.PidParser(out) Expect(err).ToNot(HaveOccurred()) @@ -215,15 +216,15 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, func() { chkOvsCgrpProcs := func(node *corev1.Node) (string, error) { testlog.Info("Verify cgroup.procs is not empty") cmd := []string{"cat", cgroupProcs} - return nodes.ExecCommandOnNode(context.TODO(), cmd, node) + return nodeInspector.ExecCommandOnNode(context.TODO(), cmd, node) } chkOvsCgrpCpuset := func(node *corev1.Node) (string, error) { cmd := []string{"cat", cgroupCpusetCpus} - return nodes.ExecCommandOnNode(context.TODO(), cmd, node) + return nodeInspector.ExecCommandOnNode(context.TODO(), cmd, node) } chkOvsCgroupLoadBalance := func(node *corev1.Node) (string, error) { cmd := []string{"cat", cgroupLoadBalance} - return nodes.ExecCommandOnNode(context.TODO(), cmd, node) + return nodeInspector.ExecCommandOnNode(context.TODO(), cmd, node) } It("[test_id:64098] Verify cgroup layout on worker node", func() { @@ -552,7 +553,7 @@ func cpuSpecToString(cpus *performancev2.CPU) string { // checkCpuCount check if the node has sufficient cpus func checkCpuCount(ctx context.Context, workerNode *corev1.Node) { - onlineCPUCount, err := nodes.ExecCommandOnNode(ctx, []string{"nproc", "--all"}, workerNode) + onlineCPUCount, err := nodeInspector.ExecCommandOnNode(ctx, []string{"nproc", "--all"}, workerNode) if err != nil { Fail(fmt.Sprintf("Failed to fetch online CPUs: %v", err)) } @@ -621,7 +622,7 @@ func getCPUMaskForPids(ctx context.Context, pidList []string, targetNode *corev1 for _, pid := range pidList { cmd := []string{"taskset", "-pc", pid} - cpumask, err := nodes.ExecCommandOnNode(ctx, cmd, targetNode) + cpumask, err := nodeInspector.ExecCommandOnNode(ctx, cmd, targetNode) if err != nil { return nil, fmt.Errorf("failed to fetch cpus of %s: %s", pid, err) } @@ -733,7 +734,7 @@ func ovsPids(ctx context.Context, ovsSystemdServices []string, workerRTNode *cor // taskSet returns cpus used by the pid func taskSet(ctx context.Context, pid string, workerRTNode *corev1.Node) cpuset.CPUSet { cmd := []string{"taskset", "-pc", pid} - output, err := nodes.ExecCommandOnNode(ctx, cmd, workerRTNode) + output, err := nodeInspector.ExecCommandOnNode(ctx, cmd, workerRTNode) Expect(err).ToNot(HaveOccurred(), "unable to fetch cpus using taskset") tasksetOutput := strings.Split(strings.TrimSpace(output), ":") cpus := strings.TrimSpace(tasksetOutput[1]) diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go index f13cb25818..6e44d874a5 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go @@ -25,6 +25,7 @@ import ( testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" ) @@ -81,7 +82,7 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord } kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"} for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandOnNode(context.TODO(), kubeletArguments, &node) + stdout, err := nodeInspector.ExecCommandOnNode(context.TODO(), kubeletArguments, &node) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) } @@ -215,7 +216,7 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).ToNot(Equal(180)) } for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandOnNode(context.TODO(), kubeletArguments, &node) + stdout, err := nodeInspector.ExecCommandOnNode(context.TODO(), kubeletArguments, &node) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) } diff --git a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go index ad739e22c8..0d4a696b13 100644 --- a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go +++ b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go @@ -33,6 +33,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" @@ -333,7 +334,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) By("Verifying node kernel arguments") - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &workerRTNodes[0], []string{"cat", "/proc/cmdline"}) + cmdline, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &workerRTNodes[0], []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) Expect(cmdline).To(ContainSubstring("intel_pstate=passive")) Expect(cmdline).ToNot(ContainSubstring("intel_pstate=disable")) @@ -703,7 +704,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific containerCgroup := "" pid, err := nodes.ContainerPid(context.TODO(), &workerRTNodes[0], containerID) cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &workerRTNodes[0], cmd) + out, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &workerRTNodes[0], cmd) containerCgroup, err = cgroup.PidParser(out) cgroupv2, err := cgroup.IsVersion2(context.TODO(), testclient.Client) Expect(err).ToNot(HaveOccurred()) @@ -716,7 +717,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific testlog.Infof("test pod %s with container id %s cgroup path %s", testpod.Name, containerID, cpusetCpusPath) By("Verify powersetting of cpus used by the pod") cmd = []string{"cat", cpusetCpusPath} - output, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[0]) + output, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[0]) Expect(err).ToNot(HaveOccurred()) cpus, err := cpuset.Parse(output) targetCpus := cpus.List() @@ -805,7 +806,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific pid, err := nodes.ContainerPid(context.TODO(), &workerRTNodes[0], containerID) cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), &workerRTNodes[0], cmd) + out, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), &workerRTNodes[0], cmd) containerCgroup, err = cgroup.PidParser(out) cgroupv2, err := cgroup.IsVersion2(context.TODO(), testclient.Client) Expect(err).ToNot(HaveOccurred()) @@ -818,7 +819,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific testlog.Infof("test pod %s with container id %s cgroup path %s", testpod.Name, containerID, cpusetCpusPath) By("Verify powersetting of cpus used by the pod") cmd = []string{"cat", cpusetCpusPath} - output, err := nodes.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[0]) + output, err := nodeInspector.ExecCommandOnNode(context.TODO(), cmd, &workerRTNodes[0]) Expect(err).ToNot(HaveOccurred()) cpus, err := cpuset.Parse(output) targetCpus := cpus.List() @@ -922,13 +923,13 @@ func deleteTestPod(ctx context.Context, testpod *corev1.Pod) { func checkCpuGovernorsAndResumeLatency(ctx context.Context, cpus []int, targetNode *corev1.Node, pm_qos string, governor string) error { for _, cpu := range cpus { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/power/pm_qos_resume_latency_us", cpu)} - output, err := nodes.ExecCommandOnNode(ctx, cmd, targetNode) + output, err := nodeInspector.ExecCommandOnNode(ctx, cmd, targetNode) if err != nil { return err } Expect(output).To(Equal(pm_qos)) cmd = []string{"/bin/bash", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", cpu)} - output, err = nodes.ExecCommandOnNode(ctx, cmd, targetNode) + output, err = nodeInspector.ExecCommandOnNode(ctx, cmd, targetNode) if err != nil { return err } @@ -947,7 +948,7 @@ func checkHardwareCapability(ctx context.Context, workerRTNodes []corev1.Node) { Skip(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name)) } // Additional check so that test gets skipped on vm with fake numa - onlineCPUCount, err := nodes.ExecCommandOnNode(ctx, []string{"nproc", "--all"}, &node) + onlineCPUCount, err := nodeInspector.ExecCommandOnNode(ctx, []string{"nproc", "--all"}, &node) Expect(err).ToNot(HaveOccurred()) onlineCPUInt, err := strconv.Atoi(onlineCPUCount) Expect(err).ToNot(HaveOccurred()) diff --git a/test/e2e/performanceprofile/functests/9_reboot/devices.go b/test/e2e/performanceprofile/functests/9_reboot/devices.go index ef95d2fb72..ef2d1dfbba 100644 --- a/test/e2e/performanceprofile/functests/9_reboot/devices.go +++ b/test/e2e/performanceprofile/functests/9_reboot/devices.go @@ -19,6 +19,7 @@ import ( testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" testnodes "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" testpods "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" ) @@ -377,7 +378,7 @@ func waitForNodeReadyOrFail(tag, nodeName string, timeout, polling time.Duration func runCommandOnNodeThroughMCD(ctx context.Context, node *corev1.Node, description, command string) (string, error) { testlog.Infof("node %q: before %s", node.Name, description) - out, err := testnodes.ExecCommandOnMachineConfigDaemon(ctx, node, []string{"sh", "-c", command}) + out, err := nodeInspector.ExecCommandOnDaemon(ctx, node, []string{"sh", "-c", command}) testlog.Infof("node %q: output=[%s]", node.Name, string(out)) testlog.Infof("node %q: after %s", node.Name, description) return string(out), err diff --git a/test/e2e/performanceprofile/functests/Z_deconfig/deconfig.go b/test/e2e/performanceprofile/functests/Z_deconfig/deconfig.go index 3b0ba3b305..a001ade535 100644 --- a/test/e2e/performanceprofile/functests/Z_deconfig/deconfig.go +++ b/test/e2e/performanceprofile/functests/Z_deconfig/deconfig.go @@ -9,8 +9,8 @@ import ( testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" - nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/namespaces" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" ) // This test suite is designed to perform cleanup actions that should occur after all test suites have been executed. diff --git a/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go b/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go index d2d12ac488..94431a1a66 100644 --- a/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go +++ b/test/e2e/performanceprofile/functests/utils/cgroup/runtime/runtime.go @@ -6,10 +6,9 @@ import ( "path/filepath" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" ) const ( @@ -32,7 +31,7 @@ func GetContainerRuntimeTypeFor(ctx context.Context, c client.Client, pod *corev "-c", fmt.Sprintf("/bin/awk -F '\"' '/runtime_path.*/ { print $2 }' %s", CRIORuntimeConfigFile), } - out, err := nodes.ExecCommandOnNode(ctx, cmd, node) + out, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) if err != nil { return "", fmt.Errorf("failed to execute command on node; cmd=%q node=%q err=%v", cmd, node.Name, err) } diff --git a/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go b/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go index a36e8fcef8..cb1a7eb532 100644 --- a/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go +++ b/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" ) // IsVM checks if a given node's underlying infrastructure is a VM @@ -16,7 +16,7 @@ func IsVM(node *corev1.Node) (bool, error) { "-c", "systemd-detect-virt > /dev/null ; echo $?", } - output, err := nodes.ExecCommandOnMachineConfigDaemon(context.TODO(), node, cmd) + output, err := nodeInspector.ExecCommandOnDaemon(context.TODO(), node, cmd) if err != nil { return false, err } diff --git a/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go b/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go index ad6bb2e69c..12eccde7c2 100644 --- a/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go +++ b/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go @@ -2,6 +2,8 @@ package node_inspector import ( "context" + "fmt" + "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -9,10 +11,16 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" + testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" + testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/daemonset" + testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" + testpods "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" ) const serviceAccountSuffix = "sa" @@ -66,10 +74,55 @@ func Delete(cli client.Client, namespace, name string) error { return nil } -func IsRunning(cli client.Client, namespace, name string) (bool,error){ +func isRunning(cli client.Client, namespace, name string) (bool, error) { return daemonset.IsRunning(cli, namespace, name) } +// getDaemonPodByNode returns the daemon pod that runs on the specified node +func getDaemonPodByNode(node *corev1.Node) (*corev1.Pod, error) { + listOptions := &client.ListOptions{ + Namespace: testutils.NodeInspectorNamespace, + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), + LabelSelector: labels.SelectorFromSet(labels.Set{"name": testutils.NodeInspectorName}), + } + + pods := &corev1.PodList{} + if err := testclient.DataPlaneClient.List(context.TODO(), pods, listOptions); err != nil { + return nil, err + } + if len(pods.Items) < 1 { + return nil, fmt.Errorf("failed to get daemon pod for the node %q", node.Name) + } + return &pods.Items[0], nil +} + +// ExecCommandOnDaemon returns the output of the command execution on the node inspector daemon pod that runs on the specified node +func ExecCommandOnDaemon(ctx context.Context, node *corev1.Node, command []string) ([]byte, error) { + // Ensure the node inspector is running as we are counting on it + ok, err := isRunning(testclient.DataPlaneClient, testutils.NodeInspectorNamespace, testutils.NodeInspectorName) + if err != nil || !ok { + return nil, err + } + pod, err := getDaemonPodByNode(node) + if err != nil { + return nil, err + } + testlog.Infof("found daemon pod %s for node %s", pod.Name, node.Name) + + return testpods.WaitForPodOutput(ctx, testclient.K8sClient, pod, command) +} + +// ExecCommandOnNode executes given command on given node and returns the result +func ExecCommandOnNode(ctx context.Context, cmd []string, node *corev1.Node) (string, error) { + out, err := ExecCommandOnDaemon(ctx, node, cmd) + if err != nil { + return "", err + } + + trimmedString := strings.Trim(string(out), "\n") + return strings.ReplaceAll(trimmedString, "\r", ""), nil +} + func createDaemonSet(name, namespace, serviceAccountName, image string) *appsv1.DaemonSet { MountPropagationHostToContainer := corev1.MountPropagationHostToContainer return &appsv1.DaemonSet{ diff --git a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go index 12d0fcfad1..a96eee6cdf 100644 --- a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go +++ b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go @@ -28,7 +28,7 @@ import ( testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" - testpods "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" ) const ( @@ -134,51 +134,10 @@ func GetNonPerformancesWorkers(nodeSelectorLabels map[string]string) ([]corev1.N return nonPerformanceWorkerNodes, err } -// GetMachineConfigDaemonByNode returns the machine-config-daemon pod that runs on the specified node -func GetMachineConfigDaemonByNode(node *corev1.Node) (*corev1.Pod, error) { - listOptions := &client.ListOptions{ - Namespace: testutils.NamespaceMachineConfigOperator, - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), - LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}), - } - - mcds := &corev1.PodList{} - if err := testclient.Client.List(context.TODO(), mcds, listOptions); err != nil { - return nil, err - } - - if len(mcds.Items) < 1 { - return nil, fmt.Errorf("failed to get machine-config-daemon pod for the node %q", node.Name) - } - return &mcds.Items[0], nil -} - -// ExecCommandOnMachineConfigDaemon returns the output of the command execution on the machine-config-daemon pod that runs on the specified node -func ExecCommandOnMachineConfigDaemon(ctx context.Context, node *corev1.Node, command []string) ([]byte, error) { - mcd, err := GetMachineConfigDaemonByNode(node) - if err != nil { - return nil, err - } - testlog.Infof("found mcd %s for node %s", mcd.Name, node.Name) - - return testpods.WaitForPodOutput(ctx, testclient.K8sClient, mcd, command) -} - -// ExecCommandOnNode executes given command on given node and returns the result -func ExecCommandOnNode(ctx context.Context, cmd []string, node *corev1.Node) (string, error) { - out, err := ExecCommandOnMachineConfigDaemon(ctx, node, cmd) - if err != nil { - return "", err - } - - trimmedString := strings.Trim(string(out), "\n") - return strings.ReplaceAll(trimmedString, "\r", ""), nil -} - // GetKubeletConfig returns KubeletConfiguration loaded from the node /etc/kubernetes/kubelet.conf func GetKubeletConfig(ctx context.Context, node *corev1.Node) (*kubeletconfigv1beta1.KubeletConfiguration, error) { command := []string{"cat", path.Join("/rootfs", testutils.FilePathKubeletConfig)} - kubeletBytes, err := ExecCommandOnMachineConfigDaemon(ctx, node, command) + kubeletBytes, err := nodeInspector.ExecCommandOnDaemon(ctx, node, command) if err != nil { return nil, err } @@ -236,12 +195,12 @@ func HasPreemptRTKernel(ctx context.Context, node *corev1.Node) error { // with rpm-ostree rpm -q is telling you what you're booted into always, // because ostree binds together (kernel, userspace) as a single commit. cmd := []string{"chroot", "/rootfs", "rpm", "-q", "kernel-rt-core"} - if _, err := ExecCommandOnNode(ctx, cmd, node); err != nil { + if _, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node); err != nil { return err } cmd = []string{"/bin/bash", "-c", "cat /rootfs/sys/kernel/realtime"} - out, err := ExecCommandOnNode(ctx, cmd, node) + out, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) if err != nil { return err } @@ -255,7 +214,7 @@ func HasPreemptRTKernel(ctx context.Context, node *corev1.Node) error { func GetDefaultSmpAffinityRaw(ctx context.Context, node *corev1.Node) (string, error) { cmd := []string{"cat", "/proc/irq/default_smp_affinity"} - return ExecCommandOnNode(ctx, cmd, node) + return nodeInspector.ExecCommandOnNode(ctx, cmd, node) } // GetDefaultSmpAffinitySet returns the default smp affinity mask for the node @@ -275,7 +234,7 @@ func GetDefaultSmpAffinitySet(ctx context.Context, node *corev1.Node) (cpuset.CP // GetOnlineCPUsSet returns the list of online (being scheduled) CPUs on the node func GetOnlineCPUsSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, error) { command := []string{"cat", sysDevicesOnlineCPUs} - onlineCPUs, err := ExecCommandOnNode(ctx, command, node) + onlineCPUs, err := nodeInspector.ExecCommandOnNode(ctx, command, node) if err != nil { return cpuset.New(), err } @@ -286,7 +245,7 @@ func GetOnlineCPUsSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, er // Use a random cpuID from the return value of GetOnlineCPUsSet if not sure func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuID)} - threadSiblingsList, err := ExecCommandOnNode(ctx, cmd, node) + threadSiblingsList, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) ExpectWithOffset(1, err).ToNot(HaveOccurred()) // how many thread sibling you have = SMT level // example: 2-way SMT means 2 threads sibling for each thread @@ -298,7 +257,7 @@ func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int { // GetNumaNodes returns the number of numa nodes and the associated cpus as list on the node func GetNumaNodes(ctx context.Context, node *corev1.Node) (map[int][]int, error) { lscpuCmd := []string{"lscpu", "-e=node,core,cpu", "-J"} - cmdout, err := ExecCommandOnNode(ctx, lscpuCmd, node) + cmdout, err := nodeInspector.ExecCommandOnNode(ctx, lscpuCmd, node) var numaNode, cpu int if err != nil { return nil, err @@ -324,7 +283,7 @@ func GetNumaNodes(ctx context.Context, node *corev1.Node) (map[int][]int, error) // GetCoreSiblings returns the siblings of core per numa node func GetCoreSiblings(ctx context.Context, node *corev1.Node) (map[int]map[int][]int, error) { lscpuCmd := []string{"lscpu", "-e=node,core,cpu", "-J"} - out, err := ExecCommandOnNode(ctx, lscpuCmd, node) + out, err := nodeInspector.ExecCommandOnNode(ctx, lscpuCmd, node) var result NumaNodes var numaNode, core, cpu int coreSiblings := make(map[int]map[int][]int) @@ -459,17 +418,17 @@ func GetNumaRanges(cpuString string) string { func GetNodeInterfaces(ctx context.Context, node corev1.Node) ([]NodeInterface, error) { var nodeInterfaces []NodeInterface listNetworkInterfacesCmd := []string{"/bin/sh", "-c", fmt.Sprintf("ls -l /sys/class/net")} - networkInterfaces, err := ExecCommandOnMachineConfigDaemon(ctx, &node, listNetworkInterfacesCmd) + networkInterfaces, err := nodeInspector.ExecCommandOnDaemon(ctx, &node, listNetworkInterfacesCmd) if err != nil { return nil, err } ipLinkShowCmd := []string{"ip", "link", "show"} - interfaceLinksStatus, err := ExecCommandOnMachineConfigDaemon(ctx, &node, ipLinkShowCmd) + interfaceLinksStatus, err := nodeInspector.ExecCommandOnDaemon(ctx, &node, ipLinkShowCmd) if err != nil { return nil, err } defaultRouteCmd := []string{"ip", "route", "show", "0.0.0.0/0"} - defaultRoute, err := ExecCommandOnMachineConfigDaemon(ctx, &node, defaultRouteCmd) + defaultRoute, err := nodeInspector.ExecCommandOnDaemon(ctx, &node, defaultRouteCmd) if err != nil { return nil, err } @@ -535,7 +494,7 @@ func ContainerPid(ctx context.Context, node *corev1.Node, containerId string) (s var cridata = []byte{} Eventually(func() []byte { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("chroot /rootfs crictl inspect %s", containerId)} - cridata, err = ExecCommandOnMachineConfigDaemon(ctx, node, cmd) + cridata, err = nodeInspector.ExecCommandOnDaemon(ctx, node, cmd) Expect(err).ToNot(HaveOccurred(), "failed to run %s cmd", cmd) return cridata }, 10*time.Second, 5*time.Second).ShouldNot(BeEmpty()) diff --git a/test/e2e/performanceprofile/functests/utils/systemd/systemd.go b/test/e2e/performanceprofile/functests/utils/systemd/systemd.go index f45fd86cfc..b827e71674 100644 --- a/test/e2e/performanceprofile/functests/utils/systemd/systemd.go +++ b/test/e2e/performanceprofile/functests/utils/systemd/systemd.go @@ -4,18 +4,18 @@ import ( "context" "fmt" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" corev1 "k8s.io/api/core/v1" ) func Status(ctx context.Context, unitfile string, node *corev1.Node) (string, error) { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("chroot /rootfs systemctl status %s --lines=0 --no-pager", unitfile)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(ctx, node, cmd) + out, err := nodeInspector.ExecCommandOnDaemon(ctx, node, cmd) return string(out), err } func ShowProperty(ctx context.Context, unitfile string, property string, node *corev1.Node) (string, error) { cmd := []string{"/bin/bash", "-c", fmt.Sprintf("chroot /rootfs systemctl show -p %s %s --no-pager", property, unitfile)} - out, err := nodes.ExecCommandOnMachineConfigDaemon(ctx, node, cmd) + out, err := nodeInspector.ExecCommandOnDaemon(ctx, node, cmd) return string(out), err } diff --git a/test/e2e/performanceprofile/functests/utils/tuned/tuned.go b/test/e2e/performanceprofile/functests/utils/tuned/tuned.go index 75ecec0f8c..0c3b6299c5 100644 --- a/test/e2e/performanceprofile/functests/utils/tuned/tuned.go +++ b/test/e2e/performanceprofile/functests/utils/tuned/tuned.go @@ -21,6 +21,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" + nodeInspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" ) @@ -47,7 +48,7 @@ func GetPod(ctx context.Context, node *corev1.Node) (*corev1.Pod, error) { func WaitForStalldTo(ctx context.Context, run bool, interval, timeout time.Duration, node *corev1.Node) error { return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { cmd := []string{"/bin/bash", "-c", "pidof stalld || echo \"stalld not running\""} - stalldPid, err := nodes.ExecCommandOnNode(ctx, cmd, node) + stalldPid, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) if err != nil { klog.Errorf("failed to execute command %q on node: %q; %v", cmd, node.Name, err) return false, err @@ -70,7 +71,7 @@ func WaitForStalldTo(ctx context.Context, run bool, interval, timeout time.Durat func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[string]string, kernelParameters []string, stalld, rtkernel bool) { cmd := []string{"/bin/bash", "-c", "pidof stalld || echo \"stalld not running\""} By(fmt.Sprintf("Executing %q", cmd)) - stalldPid, err := nodes.ExecCommandOnNode(ctx, cmd, node) + stalldPid, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q; %v", cmd, node.Name, err) _, err = strconv.Atoi(stalldPid) @@ -102,14 +103,14 @@ func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[strin for param, expected := range sysctlMap { cmd = []string{"sysctl", "-n", param} By(fmt.Sprintf("Executing %q", cmd)) - out, err := nodes.ExecCommandOnNode(ctx, cmd, node) + out, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) ExpectWithOffset(1, out).Should(Equal(expected), "parameter %s value is not %s", param, expected) } cmd = []string{"cat", "/proc/cmdline"} By(fmt.Sprintf("Executing %q", cmd)) - cmdline, err := nodes.ExecCommandOnNode(ctx, cmd, node) + cmdline, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) for _, param := range kernelParameters { @@ -119,7 +120,7 @@ func CheckParameters(ctx context.Context, node *corev1.Node, sysctlMap map[strin if !rtkernel { cmd = []string{"uname", "-a"} By(fmt.Sprintf("Executing %q", cmd)) - kernel, err := nodes.ExecCommandOnNode(ctx, cmd, node) + kernel, err := nodeInspector.ExecCommandOnNode(ctx, cmd, node) ExpectWithOffset(1, err).ToNot(HaveOccurred(), "failed to execute command %q on node: %q", cmd, node.Name) ExpectWithOffset(1, kernel).To(ContainSubstring("Linux"), "kernel should be Linux")