diff --git a/connectivity/check/check.go b/connectivity/check/check.go index a2ae564f44..934d90efb7 100644 --- a/connectivity/check/check.go +++ b/connectivity/check/check.go @@ -45,6 +45,7 @@ type Parameters struct { PerfCRR bool PerfHostNet bool PerfSamples int + PerfLatency bool CurlImage string PerformanceImage string JSONMockImage string diff --git a/connectivity/check/context.go b/connectivity/check/context.go index 62a77ef5e4..5000db8fc5 100644 --- a/connectivity/check/context.go +++ b/connectivity/check/context.go @@ -96,6 +96,7 @@ type PerfResult struct { Samples int Values []float64 Avg float64 + Latency map[string][]float64 } // verbose returns the value of the user-provided verbosity flag. @@ -553,16 +554,36 @@ func (ct *ConnectivityTest) report() error { } if ct.params.Perf { - // Report Performance results - ct.Headerf("ðŸ”Ĩ Performance Test Summary: ") - ct.Logf("%s", strings.Repeat("-", 145)) - ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Avg value") - ct.Logf("%s", strings.Repeat("-", 145)) - for p, d := range ct.PerfResults { - ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %.2f (%s)", d.Scenario, p.Pod, p.Test, d.Samples, d.Duration, d.Avg, d.Metric) - ct.Debugf("Individual Values from run : %s", d.Values) + + ct.Headerf("ðŸ”Ĩ Latency Test Summary: ") + + // Report Performance results for latency + if ct.params.PerfLatency { + ct.Logf("%s", strings.Repeat("-", 233)) + ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Min", "Mean", "Max", "P50", "P90", "P99") + ct.Logf("%s", strings.Repeat("-", 233)) + for p, d := range ct.PerfResults { + ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s", + d.Scenario, p.Pod, p.Test, d.Samples, d.Duration, + d.Latency["minLatency"][0], d.Metric, + d.Latency["meanLatency"][0], d.Metric, + d.Latency["maxLatency"][0], d.Metric, + d.Latency["p50Latency"][0], d.Metric, + d.Latency["p90Latency"][0], d.Metric, + d.Latency["p99Latency"][0], d.Metric) + } + ct.Logf("%s", strings.Repeat("-", 233)) + } else { + ct.Logf("%s", strings.Repeat("-", 145)) + // Report Performance results for throughput + ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Avg value") + ct.Logf("%s", strings.Repeat("-", 145)) + for p, d := range ct.PerfResults { + ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %.2f (%s)", d.Scenario, p.Pod, p.Test, d.Samples, d.Duration, d.Avg, d.Metric) + ct.Debugf("Individual Values from run : %s", d.Values) + } + ct.Logf("%s", strings.Repeat("-", 145)) } - ct.Logf("%s", strings.Repeat("-", 145)) } ct.Headerf("✅ All %d tests (%d actions) successful, %d tests skipped, %d scenarios skipped.", nt-nst, na, nst, nss) diff --git a/connectivity/tests/perfpod.go b/connectivity/tests/perfpod.go index a8454496ba..1b986e7eb2 100644 --- a/connectivity/tests/perfpod.go +++ b/connectivity/tests/perfpod.go @@ -31,6 +31,7 @@ type netPerfPodtoPod struct { } var netPerfRegex = regexp.MustCompile(`\s+\d+\s+\d+\s+(\d+|\S+)\s+(\S+|\d+)\s+(\S+)+\s+(\S+)?`) +var netPerfRegexLatency = regexp.MustCompile(`(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?)`) func (s *netPerfPodtoPod) Name() string { tn := "perf-pod-to-pod" @@ -44,6 +45,7 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) { samples := t.Context().Params().PerfSamples duration := t.Context().Params().PerfDuration crr := t.Context().Params().PerfCRR + latency := t.Context().Params().PerfLatency for _, c := range t.Context().PerfClientPods() { c := c for _, server := range t.Context().PerfServerPod() { @@ -57,7 +59,12 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) { action.CollectFlows = false action.Run(func(a *check.Action) { if crr { - netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_CRR", a, t.Context().PerfResults, 1, 30, scenarioName) + netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_CRR", a, t.Context().PerfResults, samples, duration, scenarioName) + } else if latency { + netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_RR_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName) + netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_STREAM_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName) + netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "UDP_RR_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName) + netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "UDP_STREAM_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName) } else { netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_RR", a, t.Context().PerfResults, samples, duration, scenarioName) netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_STREAM", a, t.Context().PerfResults, samples, duration, scenarioName) @@ -69,48 +76,134 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) { } } +func buildExecCommand(test string, sip string, duration time.Duration, args []string) []string { + exec := []string{"/usr/local/bin/netperf", "-H", sip, "-l", duration.String(), "-t", test, "--", "-R", "1", "-m", fmt.Sprintf("%d", messageSize)} + exec = append(exec, args...) + + return exec +} + func netperf(ctx context.Context, sip string, podname string, test string, a *check.Action, result map[check.PerfTests]check.PerfResult, samples int, duration time.Duration, scenarioName string) { // Define test about to be executed and from which pod k := check.PerfTests{ Pod: podname, Test: test, } - metric := string("OP/s") - if strings.Contains(test, "STREAM") { - metric = "Mb/s" - } - exec := []string{"/usr/local/bin/netperf", "-H", sip, "-l", duration.String(), "-t", test, "--", "-R", "1", "-m", fmt.Sprintf("%d", messageSize)} - // recv socketsize send socketsize msg size|okmsg duration value + var metric string values := []float64{} - // Result data - for i := 0; i < samples; i++ { - a.ExecInPod(ctx, exec) - d := netPerfRegex.FindStringSubmatch(a.CmdOutput()) - if len(d) < 5 { - a.Fatal("Unable to process netperf result") + var res check.PerfResult + + if strings.Contains(test, "LATENCY") { + test = strings.ReplaceAll(test, "_LATENCY", "") + k.Test = test + metric = string("Ξs") + latency := make(map[string][]float64) + + args := []string{"-o", "min_latency,mean_latency,max_latency,P50_LATENCY,P90_LATENCY,P99_LATENCY"} + exec := buildExecCommand(test, sip, duration, args) + + for i := 0; i < samples; i++ { + a.ExecInPod(ctx, exec) + d := netPerfRegexLatency.FindStringSubmatch(a.CmdOutput()) + + if len(d) != 7 { + a.Fatal("Unable to process netperf result") + } + + minLatency, err := strconv.ParseFloat(d[1], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["minLatency"] = append(latency["minLatency"], minLatency) + + meanLatency, err := strconv.ParseFloat(d[2], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["meanLatency"] = append(latency["meanLatency"], meanLatency) + + maxLatency, err := strconv.ParseFloat(d[3], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["maxLatency"] = append(latency["maxLatency"], maxLatency) + + p50Latency, err := strconv.ParseFloat(d[4], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["p50Latency"] = append(latency["p50Latency"], p50Latency) + + p90Latency, err := strconv.ParseFloat(d[5], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["p90Latency"] = append(latency["p90Latency"], p90Latency) + + p99Latency, err := strconv.ParseFloat(d[6], 64) + if err != nil { + a.Fatal("Unable to parse netperf result") + } + latency["p99Latency"] = append(latency["p99Latency"], p99Latency) + } - nv := "" - if len(d[len(d)-1]) > 0 { - nv = d[len(d)-1] - } else { - nv = d[len(d)-2] + + latency["minLatency"] = []float64{listAvg(latency["minLatency"])} + latency["meanLatency"] = []float64{listAvg(latency["meanLatency"])} + latency["maxLatency"] = []float64{listAvg(latency["maxLatency"])} + latency["p50Latency"] = []float64{listAvg(latency["p50Latency"])} + latency["p90Latency"] = []float64{listAvg(latency["p90Latency"])} + latency["p99Latency"] = []float64{listAvg(latency["p99Latency"])} + + res = check.PerfResult{ + Scenario: scenarioName, + Samples: samples, + Metric: metric, + Duration: duration, + Latency: latency, } - f, err := strconv.ParseFloat(nv, 64) - if err == nil { - values = append(values, f) - } else { - a.Fatal("Unable to parse netperf result") + + } else { + metric = string("OP/s") + if strings.Contains(test, "STREAM") { + metric = "Mb/s" } + + exec := buildExecCommand(test, sip, duration, []string{}) + // recv socketsize send socketsize msg size|okmsg duration value + // Result data + for i := 0; i < samples; i++ { + a.ExecInPod(ctx, exec) + d := netPerfRegex.FindStringSubmatch(a.CmdOutput()) + if len(d) < 5 { + a.Fatal("Unable to process netperf result") + } + nv := "" + if len(d[len(d)-1]) > 0 { + nv = d[len(d)-1] + } else { + nv = d[len(d)-2] + } + f, err := strconv.ParseFloat(nv, 64) + if err == nil { + values = append(values, f) + } else { + a.Fatal("Unable to parse netperf result") + } + } + + res = check.PerfResult{ + Scenario: scenarioName, + Metric: metric, + Duration: duration, + Values: values, + Samples: samples, + Avg: listAvg(values), + } + } - res := check.PerfResult{ - Scenario: scenarioName, - Metric: metric, - Duration: duration, - Values: values, - Samples: samples, - Avg: listAvg(values), - } + result[k] = res } diff --git a/internal/cli/cmd/connectivity.go b/internal/cli/cmd/connectivity.go index bb5ba9b8ca..bb4d15015a 100644 --- a/internal/cli/cmd/connectivity.go +++ b/internal/cli/cmd/connectivity.go @@ -158,6 +158,7 @@ func newCmdConnectivityTest(hooks Hooks) *cobra.Command { cmd.Flags().IntVar(¶ms.PerfSamples, "perf-samples", 1, "Number of Performance samples to capture (how many times to run each test)") cmd.Flags().BoolVar(¶ms.PerfCRR, "perf-crr", false, "Run Netperf CRR Test. --perf-samples and --perf-duration ignored") cmd.Flags().BoolVar(¶ms.PerfHostNet, "host-net", false, "Use host networking during network performance tests") + cmd.Flags().BoolVar(¶ms.PerfLatency, "perf-latency", false, "Run network latency tests") cmd.Flags().StringVar(¶ms.CurlImage, "curl-image", defaults.ConnectivityCheckAlpineCurlImage, "Image path to use for curl") cmd.Flags().StringVar(¶ms.PerformanceImage, "performance-image", defaults.ConnectivityPerformanceImage, "Image path to use for performance")