Skip to content

Commit

Permalink
Review fixes
Browse files Browse the repository at this point in the history
Signed-off-by: Marcel Zieba <[email protected]>
  • Loading branch information
marseel committed Dec 12, 2023
1 parent 9248a9a commit 76325d1
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 51 deletions.
2 changes: 2 additions & 0 deletions connectivity/check/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ type Parameters struct {
Timestamp bool
PauseOnFail bool
SkipIPCacheCheck bool
// Perf is not user-facing parameter, but it's used to run perf subcommand
// using connectivity test suite.
Perf bool
PerfReportDir string
PerfDuration time.Duration
Expand Down
35 changes: 20 additions & 15 deletions connectivity/perf/benchmarks/netperf/perfpod.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ import (
)

const (
messageSize = 1024
tool = "netperf"
messageSize = 1024
netperfToolName = "netperf"
)

// Network Performance
Expand All @@ -33,15 +33,23 @@ type netPerf struct {

func (s *netPerf) Name() string {
if s.name == "" {
return tool
return netperfToolName
}
return fmt.Sprintf("%s:%s", tool, s.name)
return fmt.Sprintf("%s:%s", netperfToolName, s.name)
}

func (s *netPerf) Run(ctx context.Context, t *check.Test) {
samples := t.Context().Params().PerfSamples
duration := t.Context().Params().PerfDuration

tests := []string{
"TCP_RR",
"TCP_STREAM",
"UDP_RR",
"UDP_STREAM",
"TCP_CRR",
}

for sample := 1; sample <= samples; sample++ {
for _, c := range t.Context().PerfClientPods() {
c := c
Expand All @@ -63,29 +71,20 @@ func (s *netPerf) Run(ctx context.Context, t *check.Test) {
sameNode = false
}

tests := []string{
"TCP_RR",
"TCP_STREAM",
"UDP_RR",
"UDP_STREAM",
"TCP_CRR",
}

for _, test := range tests {
action := t.NewAction(s, tool, &c, server, features.IPFamilyV4)
action := t.NewAction(s, netperfToolName, &c, server, features.IPFamilyV4)
action.CollectFlows = false
action.Run(func(a *check.Action) {
k := common.PerfTests{
Test: test,
Tool: tool,
Tool: netperfToolName,
SameNode: sameNode,
Sample: sample,
Duration: duration,
Scenario: scenarioName,
}
perfResult := netperf(ctx, server.Pod.Status.PodIP, k, a)
t.Context().PerfResults = append(t.Context().PerfResults, common.PerfSummary{PerfTest: k, Result: perfResult})

})
}
}
Expand Down Expand Up @@ -124,8 +123,14 @@ func netperf(ctx context.Context, sip string, perfTest common.PerfTests, a *chec
output := a.CmdOutput()
a.Debugf("Netperf output: ", output)
lines := strings.Split(output, "\n")
if len(lines) < 2 {
a.Fatal("Unable to process netperf result")
}
resultsLine := lines[len(lines)-2]
values := strings.Split(resultsLine, ",")
if len(values) != 9 {
a.Fatalf("Unable to process netperf result")
}
a.Debugf("Numbers: %v", values)

res := common.PerfResult{
Expand Down
69 changes: 45 additions & 24 deletions connectivity/perf/common/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@ import (
"bytes"
"encoding/json"
"fmt"
"maps"
"os"
"path"
"strings"
"time"
)

// LatencyMetric captures latency metrics of network performance test
type LatencyMetric struct {
Min time.Duration `json:"Min"`
Avg time.Duration `json:"Avg"`
Expand All @@ -22,8 +24,13 @@ type LatencyMetric struct {
Perc99 time.Duration `json:"Perc99"`
}

func (metric *LatencyMetric) ToPerfData(labels map[string]string) DataItem {
return DataItem{
// toPerfData export LatencyMetric in a format compatible with perfdash scheme
func (metric *LatencyMetric) toPerfData(labels map[string]string) dataItem {
resLabels := map[string]string{
"metric": "Latency",
}
maps.Copy(resLabels, labels)
return dataItem{
Data: map[string]float64{
// Let's only export percentiles
// Max is skewing results and doesn't make much sense to keep track of
Expand All @@ -32,45 +39,59 @@ func (metric *LatencyMetric) ToPerfData(labels map[string]string) DataItem {
"Perc99": float64(metric.Perc99) / float64(time.Microsecond),
},
Unit: "us",
Labels: labels,
Labels: resLabels,
}
}

// TransactionRateMetric captures transaction rate metric of network performance test
type TransactionRateMetric struct {
TransactionRate float64 `json:"Rate"` // Ops per second
}

func (metric *TransactionRateMetric) ToPerfData(labels map[string]string) DataItem {
return DataItem{
// ToPerfData export TransactionRateMetric in a format compatible with perfdash scheme
func (metric *TransactionRateMetric) toPerfData(labels map[string]string) dataItem {
resLabels := map[string]string{
"metric": "TransactionRate",
}
maps.Copy(resLabels, labels)
return dataItem{
Data: map[string]float64{
"Throughput": metric.TransactionRate,
},
Unit: "ops/s",
Labels: labels,
Labels: resLabels,
}
}

// ThroughputMetric captures throughput metric of network performance test
type ThroughputMetric struct {
Throughput float64 `json:"Throughput"` // Throughput in bytes/s
}

func (metric *ThroughputMetric) ToPerfData(labels map[string]string) DataItem {
return DataItem{
// ToPerfData export ThroughputMetric in a format compatible with perfdash scheme
func (metric *ThroughputMetric) toPerfData(labels map[string]string) dataItem {
resLabels := map[string]string{
"metric": "Throughput",
}
maps.Copy(resLabels, labels)
return dataItem{
Data: map[string]float64{
"Throughput": metric.Throughput / 1000000,
},
Unit: "Mb/s",
Labels: labels,
Labels: resLabels,
}
}

// PerfResult stores information about single network performance test results
type PerfResult struct {
Timestamp time.Time
Latency *LatencyMetric
TransactionRateMetric *TransactionRateMetric
ThroughputMetric *ThroughputMetric
}

// PerfTests stores metadata information about performed test
type PerfTests struct {
Tool string
Test string
Expand All @@ -80,6 +101,7 @@ type PerfTests struct {
Duration time.Duration
}

// PerfSummary stores combined metadata information and results of test
type PerfSummary struct {
PerfTest PerfTests
Result PerfResult
Expand All @@ -88,7 +110,7 @@ type PerfSummary struct {
// These two structures are borrowed from kubernetes/perf-tests:
// https://github.com/kubernetes/perf-tests/blob/master/clusterloader2/pkg/measurement/util/perftype.go
// this is done in order to be compatible with perfdash
type DataItem struct {
type dataItem struct {
// Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice
// that all data items with the same label combination should have the same buckets.
Data map[string]float64 `json:"data"`
Expand All @@ -100,16 +122,16 @@ type DataItem struct {
}

// PerfData contains all data items generated in current test.
type PerfData struct {
type perfData struct {
// Version is the version of the metrics. The metrics consumer could use the version
// to detect metrics version change and decide what version to support.
Version string `json:"version"`
DataItems []DataItem `json:"dataItems"`
DataItems []dataItem `json:"dataItems"`
// Labels is the labels of the dataset.
Labels map[string]string `json:"labels,omitempty"`
}

func getLabelsForTest(summary PerfSummary, metric string) map[string]string {
func getLabelsForTest(summary PerfSummary) map[string]string {
node := "other-node"
if summary.PerfTest.SameNode {
node = "same-node"
Expand All @@ -118,30 +140,29 @@ func getLabelsForTest(summary PerfSummary, metric string) map[string]string {
"scenario": summary.PerfTest.Scenario,
"node": node,
"test_type": summary.PerfTest.Tool + "-" + summary.PerfTest.Test,
"metric": metric,
}
}

func ExportPerfSummaries(summaries []PerfSummary, reporitDir string) {
perfData := []DataItem{}
// ExportPerfSummaries exports Perfsummary in a format compatible with perfdash
// and saves results in reportDir directory
func ExportPerfSummaries(summaries []PerfSummary, reportDir string) error {
data := []dataItem{}
for _, summary := range summaries {
labels := getLabelsForTest(summary)
if summary.Result.Latency != nil {
labels := getLabelsForTest(summary, "Latency")
perfData = append(perfData, summary.Result.Latency.ToPerfData(labels))
data = append(data, summary.Result.Latency.toPerfData(labels))
}
if summary.Result.TransactionRateMetric != nil {
labels := getLabelsForTest(summary, "TransactionRate")
perfData = append(perfData, summary.Result.TransactionRateMetric.ToPerfData(labels))
data = append(data, summary.Result.TransactionRateMetric.toPerfData(labels))
}
if summary.Result.ThroughputMetric != nil {
labels := getLabelsForTest(summary, "Throughput")
perfData = append(perfData, summary.Result.ThroughputMetric.ToPerfData(labels))
data = append(data, summary.Result.ThroughputMetric.toPerfData(labels))
}
}
exportSummary(PerfData{Version: "v1", DataItems: perfData}, reporitDir)
return exportSummary(perfData{Version: "v1", DataItems: data}, reportDir)
}

func exportSummary(content PerfData, reportDir string) error {
func exportSummary(content perfData, reportDir string) error {
// this filename needs to be in a specific format for perfdash
fileName := strings.Join([]string{"NetworkPerformance_benchmark", time.Now().Format(time.RFC3339)}, "_")
filePath := path.Join(reportDir, strings.Join([]string{fileName, "json"}, "."))
Expand Down
21 changes: 9 additions & 12 deletions internal/cli/cmd/connectivity.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,17 +115,6 @@ func RunE(hooks Hooks) func(cmd *cobra.Command, args []string) error {
}
}

func RunEPerf(hooks Hooks) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
// This is ugly hack that allows us to override default values
// of these parameters that are not visible in perf subcommand options
// as we can't have different defaults specified in test and perf subcommand
params.Perf = true
params.ForceDeploy = true
return RunE(hooks)(cmd, args)
}
}

func newCmdConnectivityTest(hooks Hooks) *cobra.Command {
cmd := &cobra.Command{
Use: "test",
Expand Down Expand Up @@ -213,7 +202,15 @@ func newCmdConnectivityPerf(hooks Hooks) *cobra.Command {
Use: "perf",
Short: "Test network performance",
Long: ``,
RunE: RunEPerf(hooks),
PreRun: func(cmd *cobra.Command, args []string) {
// This is a bit of hack that allows us to override default values
// of these parameters that are not visible in perf subcommand options
// as we can't have different defaults specified in test and perf subcommands
// and both of these commands share the same RunE for now.
params.Perf = true
params.ForceDeploy = true
},
RunE: RunE(hooks),
}

cmd.Flags().DurationVar(&params.PerfDuration, "duration", 10*time.Second, "Duration for the Performance test to run")
Expand Down

0 comments on commit 76325d1

Please sign in to comment.