Skip to content

Commit

Permalink
Connectivity Test: Add latency measurement
Browse files Browse the repository at this point in the history
Signed-off-by: darox <[email protected]>
  • Loading branch information
darox committed Nov 8, 2023
1 parent 408b445 commit d074005
Show file tree
Hide file tree
Showing 4 changed files with 156 additions and 40 deletions.
1 change: 1 addition & 0 deletions connectivity/check/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type Parameters struct {
PerfCRR bool
PerfHostNet bool
PerfSamples int
PerfLatency bool
CurlImage string
PerformanceImage string
JSONMockImage string
Expand Down
39 changes: 30 additions & 9 deletions connectivity/check/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type PerfResult struct {
Samples int
Values []float64
Avg float64
Latency map[string][]float64
}

// verbose returns the value of the user-provided verbosity flag.
Expand Down Expand Up @@ -553,16 +554,36 @@ func (ct *ConnectivityTest) report() error {
}

if ct.params.Perf {
// Report Performance results
ct.Headerf("🔥 Performance Test Summary: ")
ct.Logf("%s", strings.Repeat("-", 145))
ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Avg value")
ct.Logf("%s", strings.Repeat("-", 145))
for p, d := range ct.PerfResults {
ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %.2f (%s)", d.Scenario, p.Pod, p.Test, d.Samples, d.Duration, d.Avg, d.Metric)
ct.Debugf("Individual Values from run : %s", d.Values)

ct.Headerf("🔥 Latency Test Summary: ")

// Report Performance results for latency
if ct.params.PerfLatency {
ct.Logf("%s", strings.Repeat("-", 233))
ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Min", "Mean", "Max", "P50", "P90", "P99")
ct.Logf("%s", strings.Repeat("-", 233))
for p, d := range ct.PerfResults {
ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s | %-12.2f %s",
d.Scenario, p.Pod, p.Test, d.Samples, d.Duration,
d.Latency["minLatency"][0], d.Metric,
d.Latency["meanLatency"][0], d.Metric,
d.Latency["maxLatency"][0], d.Metric,
d.Latency["p50Latency"][0], d.Metric,
d.Latency["p90Latency"][0], d.Metric,
d.Latency["p99Latency"][0], d.Metric)
}
ct.Logf("%s", strings.Repeat("-", 233))
} else {
ct.Logf("%s", strings.Repeat("-", 145))
// Report Performance results for throughput
ct.Logf("📋 %-15s | %-50s | %-15s | %-15s | %-15s | %-15s", "Scenario", "Pod", "Test", "Num Samples", "Duration", "Avg value")
ct.Logf("%s", strings.Repeat("-", 145))
for p, d := range ct.PerfResults {
ct.Logf("📋 %-15s | %-50s | %-15s | %-15d | %-15s | %.2f (%s)", d.Scenario, p.Pod, p.Test, d.Samples, d.Duration, d.Avg, d.Metric)
ct.Debugf("Individual Values from run : %s", d.Values)
}
ct.Logf("%s", strings.Repeat("-", 145))
}
ct.Logf("%s", strings.Repeat("-", 145))
}

ct.Headerf("✅ All %d tests (%d actions) successful, %d tests skipped, %d scenarios skipped.", nt-nst, na, nst, nss)
Expand Down
155 changes: 124 additions & 31 deletions connectivity/tests/perfpod.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type netPerfPodtoPod struct {
}

var netPerfRegex = regexp.MustCompile(`\s+\d+\s+\d+\s+(\d+|\S+)\s+(\S+|\d+)\s+(\S+)+\s+(\S+)?`)
var netPerfRegexLatency = regexp.MustCompile(`(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?),(\d+(?:\.\d+)?)`)

func (s *netPerfPodtoPod) Name() string {
tn := "perf-pod-to-pod"
Expand All @@ -44,6 +45,7 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) {
samples := t.Context().Params().PerfSamples
duration := t.Context().Params().PerfDuration
crr := t.Context().Params().PerfCRR
latency := t.Context().Params().PerfLatency
for _, c := range t.Context().PerfClientPods() {
c := c
for _, server := range t.Context().PerfServerPod() {
Expand All @@ -57,7 +59,12 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) {
action.CollectFlows = false
action.Run(func(a *check.Action) {
if crr {
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_CRR", a, t.Context().PerfResults, 1, 30, scenarioName)
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_CRR", a, t.Context().PerfResults, samples, duration, scenarioName)
} else if latency {
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_RR_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName)
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_STREAM_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName)
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "UDP_RR_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName)
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "UDP_STREAM_LATENCY", a, t.Context().PerfResults, samples, duration, scenarioName)
} else {
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_RR", a, t.Context().PerfResults, samples, duration, scenarioName)
netperf(ctx, server.Pod.Status.PodIP, c.Pod.Name, "TCP_STREAM", a, t.Context().PerfResults, samples, duration, scenarioName)
Expand All @@ -69,48 +76,134 @@ func (s *netPerfPodtoPod) Run(ctx context.Context, t *check.Test) {
}
}

func buildExecCommand(test string, sip string, duration time.Duration, args []string) []string {
exec := []string{"/usr/local/bin/netperf", "-H", sip, "-l", duration.String(), "-t", test, "--", "-R", "1", "-m", fmt.Sprintf("%d", messageSize)}
exec = append(exec, args...)

return exec
}

func netperf(ctx context.Context, sip string, podname string, test string, a *check.Action, result map[check.PerfTests]check.PerfResult, samples int, duration time.Duration, scenarioName string) {
// Define test about to be executed and from which pod
k := check.PerfTests{
Pod: podname,
Test: test,
}
metric := string("OP/s")
if strings.Contains(test, "STREAM") {
metric = "Mb/s"
}

exec := []string{"/usr/local/bin/netperf", "-H", sip, "-l", duration.String(), "-t", test, "--", "-R", "1", "-m", fmt.Sprintf("%d", messageSize)}
// recv socketsize send socketsize msg size|okmsg duration value
var metric string
values := []float64{}
// Result data
for i := 0; i < samples; i++ {
a.ExecInPod(ctx, exec)
d := netPerfRegex.FindStringSubmatch(a.CmdOutput())
if len(d) < 5 {
a.Fatal("Unable to process netperf result")
var res check.PerfResult

if strings.Contains(test, "LATENCY") {
test = strings.ReplaceAll(test, "_LATENCY", "")
k.Test = test
metric = string("μs")
latency := make(map[string][]float64)

args := []string{"-o", "min_latency,mean_latency,max_latency,P50_LATENCY,P90_LATENCY,P99_LATENCY"}
exec := buildExecCommand(test, sip, duration, args)

for i := 0; i < samples; i++ {
a.ExecInPod(ctx, exec)
d := netPerfRegexLatency.FindStringSubmatch(a.CmdOutput())

if len(d) != 7 {
a.Fatal("Unable to process netperf result")
}

minLatency, err := strconv.ParseFloat(d[1], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["minLatency"] = append(latency["minLatency"], minLatency)

meanLatency, err := strconv.ParseFloat(d[2], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["meanLatency"] = append(latency["meanLatency"], meanLatency)

maxLatency, err := strconv.ParseFloat(d[3], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["maxLatency"] = append(latency["maxLatency"], maxLatency)

p50Latency, err := strconv.ParseFloat(d[4], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["p50Latency"] = append(latency["p50Latency"], p50Latency)

p90Latency, err := strconv.ParseFloat(d[5], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["p90Latency"] = append(latency["p90Latency"], p90Latency)

p99Latency, err := strconv.ParseFloat(d[6], 64)
if err != nil {
a.Fatal("Unable to parse netperf result")
}
latency["p99Latency"] = append(latency["p99Latency"], p99Latency)

}
nv := ""
if len(d[len(d)-1]) > 0 {
nv = d[len(d)-1]
} else {
nv = d[len(d)-2]

latency["minLatency"] = []float64{listAvg(latency["minLatency"])}
latency["meanLatency"] = []float64{listAvg(latency["meanLatency"])}
latency["maxLatency"] = []float64{listAvg(latency["maxLatency"])}
latency["p50Latency"] = []float64{listAvg(latency["p50Latency"])}
latency["p90Latency"] = []float64{listAvg(latency["p90Latency"])}
latency["p99Latency"] = []float64{listAvg(latency["p99Latency"])}

res = check.PerfResult{
Scenario: scenarioName,
Samples: samples,
Metric: metric,
Duration: duration,
Latency: latency,
}
f, err := strconv.ParseFloat(nv, 64)
if err == nil {
values = append(values, f)
} else {
a.Fatal("Unable to parse netperf result")

} else {
metric = string("OP/s")
if strings.Contains(test, "STREAM") {
metric = "Mb/s"
}

exec := buildExecCommand(test, sip, duration, []string{})
// recv socketsize send socketsize msg size|okmsg duration value
// Result data
for i := 0; i < samples; i++ {
a.ExecInPod(ctx, exec)
d := netPerfRegex.FindStringSubmatch(a.CmdOutput())
if len(d) < 5 {
a.Fatal("Unable to process netperf result")
}
nv := ""
if len(d[len(d)-1]) > 0 {
nv = d[len(d)-1]
} else {
nv = d[len(d)-2]
}
f, err := strconv.ParseFloat(nv, 64)
if err == nil {
values = append(values, f)
} else {
a.Fatal("Unable to parse netperf result")
}
}

res = check.PerfResult{
Scenario: scenarioName,
Metric: metric,
Duration: duration,
Values: values,
Samples: samples,
Avg: listAvg(values),
}

}
res := check.PerfResult{
Scenario: scenarioName,
Metric: metric,
Duration: duration,
Values: values,
Samples: samples,
Avg: listAvg(values),
}

result[k] = res
}

Expand Down
1 change: 1 addition & 0 deletions internal/cli/cmd/connectivity.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ func newCmdConnectivityTest(hooks Hooks) *cobra.Command {
cmd.Flags().IntVar(&params.PerfSamples, "perf-samples", 1, "Number of Performance samples to capture (how many times to run each test)")
cmd.Flags().BoolVar(&params.PerfCRR, "perf-crr", false, "Run Netperf CRR Test. --perf-samples and --perf-duration ignored")
cmd.Flags().BoolVar(&params.PerfHostNet, "host-net", false, "Use host networking during network performance tests")
cmd.Flags().BoolVar(&params.PerfLatency, "perf-latency", false, "Run network latency tests")

cmd.Flags().StringVar(&params.CurlImage, "curl-image", defaults.ConnectivityCheckAlpineCurlImage, "Image path to use for curl")
cmd.Flags().StringVar(&params.PerformanceImage, "performance-image", defaults.ConnectivityPerformanceImage, "Image path to use for performance")
Expand Down

0 comments on commit d074005

Please sign in to comment.