From 496891f7b817b9ee20ade335ee9a0399efb38ebb Mon Sep 17 00:00:00 2001 From: Bojan Date: Fri, 3 Sep 2021 21:23:54 -0300 Subject: [PATCH] Prometheus output format (#308) * initiap wip on prometheus output format * wip on prometheus format output * fixing printer tests * wip on prometheus tests * prometheus output docs * fix prometheus format tests * update options CLI documents --- cmd/ghz/main.go | 2 +- go.mod | 2 + go.sum | 3 + printer/influx.go | 203 +++++++++++++ printer/{printer_test.go => influx_test.go} | 9 +- printer/printer.go | 196 +------------ printer/prometheus.go | 299 ++++++++++++++++++++ printer/prometheus_test.go | 229 +++++++++++++++ runner/reporter.go | 41 ++- testdata/hello.proto | 20 ++ www/docs/options.md | 1 + www/docs/output.md | 5 + www/website/static/prometheus.txt | 39 +++ 13 files changed, 831 insertions(+), 218 deletions(-) create mode 100644 printer/influx.go rename printer/{printer_test.go => influx_test.go} (96%) create mode 100644 printer/prometheus.go create mode 100644 printer/prometheus_test.go create mode 100644 testdata/hello.proto create mode 100644 www/website/static/prometheus.txt diff --git a/cmd/ghz/main.go b/cmd/ghz/main.go index cb6b11ee..6e9236ca 100644 --- a/cmd/ghz/main.go +++ b/cmd/ghz/main.go @@ -208,7 +208,7 @@ var ( isFormatSet = false format = kingpin.Flag("format", "Output format. One of: summary, csv, json, pretty, html, influx-summary, influx-details. Default is summary."). - Short('O').Default("summary").PlaceHolder(" ").IsSetByUser(&isFormatSet).Enum("summary", "csv", "json", "pretty", "html", "influx-summary", "influx-details") + Short('O').Default("summary").PlaceHolder(" ").IsSetByUser(&isFormatSet).Enum("summary", "csv", "json", "pretty", "html", "influx-summary", "influx-details", "prometheus") isSkipFirstSet = false skipFirst = kingpin.Flag("skipFirst", "Skip the first X requests when doing the results tally."). diff --git a/go.mod b/go.mod index 7a439eff..92ffb589 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,8 @@ require ( github.com/leodido/go-urn v1.2.0 // indirect github.com/mfridman/tparse v0.8.3 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 + github.com/prometheus/common v0.4.0 github.com/rakyll/statik v0.1.6 github.com/stretchr/testify v1.6.1 go.uber.org/multierr v1.3.0 diff --git a/go.sum b/go.sum index 511981a3..bf980397 100644 --- a/go.sum +++ b/go.sum @@ -316,6 +316,7 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mfridman/tparse v0.8.3 h1:DnjEnBXdlUJPo8ShfNPasu7m52iI1ETiST5RvS6b0c4= github.com/mfridman/tparse v0.8.3/go.mod h1:LzZWLkqcQrOfgvqZn7LOSBzgZwWnqI5NQsfgQVOT1o8= @@ -378,9 +379,11 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= diff --git a/printer/influx.go b/printer/influx.go new file mode 100644 index 00000000..74d1ab3a --- /dev/null +++ b/printer/influx.go @@ -0,0 +1,203 @@ +package printer + +import ( + "encoding/json" + "fmt" + "strings" +) + +func (rp *ReportPrinter) printInfluxLine() error { + measurement := "ghz_run" + tags := rp.getInfluxTags(true) + fields := rp.getInfluxFields() + timestamp := rp.Report.Date.UnixNano() + if timestamp < 0 { + timestamp = 0 + } + + if _, err := fmt.Fprintf(rp.Out, "%v,%v %v %v", measurement, tags, fields, timestamp); err != nil { + return err + } + + return nil +} + +func (rp *ReportPrinter) printInfluxDetails() error { + measurement := "ghz_detail" + commonTags := rp.getInfluxTags(false) + + for _, v := range rp.Report.Details { + values := make([]string, 3) + values[0] = fmt.Sprintf("latency=%v", v.Latency.Nanoseconds()) + values[1] = fmt.Sprintf(`error="%v"`, cleanInfluxString(v.Error)) + values[2] = fmt.Sprintf(`status="%v"`, v.Status) + + tags := commonTags + + if v.Error != "" { + tags = tags + ",hasError=true" + } else { + tags = tags + ",hasError=false" + } + + timestamp := v.Timestamp.UnixNano() + + fields := strings.Join(values, ",") + + if _, err := fmt.Fprintf(rp.Out, "%v,%v %v %v\n", measurement, tags, fields, timestamp); err != nil { + return err + } + } + return nil +} + +func (rp *ReportPrinter) getInfluxTags(addErrors bool) string { + s := make([]string, 0, 10) + + if rp.Report.Name != "" { + s = append(s, fmt.Sprintf(`name="%v"`, cleanInfluxString(strings.TrimSpace(rp.Report.Name)))) + } + + options := rp.Report.Options + + if options.Proto != "" { + s = append(s, fmt.Sprintf(`proto="%v"`, options.Proto)) + } else if options.Protoset != "" { + s = append(s, fmt.Sprintf(`Protoset="%v"`, options.Protoset)) + } + + s = append(s, fmt.Sprintf(`call="%v"`, options.Call)) + s = append(s, fmt.Sprintf(`host="%v"`, options.Host)) + s = append(s, fmt.Sprintf("n=%v", options.Total)) + + if options.CSchedule == "const" { + s = append(s, fmt.Sprintf("c=%v", options.Concurrency)) + } else { + s = append(s, fmt.Sprintf("concurrency-schedule=%v", options.CSchedule)) + s = append(s, fmt.Sprintf("concurrency-start=%v", options.CStart)) + s = append(s, fmt.Sprintf("concurrency-end=%v", options.CEnd)) + s = append(s, fmt.Sprintf("concurrency-step=%v", options.CStep)) + s = append(s, fmt.Sprintf("concurrency-step-duration=%v", options.CStepDuration)) + s = append(s, fmt.Sprintf("concurrency-max-duration=%v", options.CMaxDuration)) + } + + if options.LoadSchedule == "const" { + s = append(s, fmt.Sprintf("rps=%v", options.RPS)) + } else { + s = append(s, fmt.Sprintf("load-schedule=%v", options.LoadSchedule)) + s = append(s, fmt.Sprintf("load-start=%v", options.LoadStart)) + s = append(s, fmt.Sprintf("load-end=%v", options.LoadEnd)) + s = append(s, fmt.Sprintf("load-step=%v", options.LoadStep)) + s = append(s, fmt.Sprintf("load-step-duration=%v", options.LoadStepDuration)) + s = append(s, fmt.Sprintf("load-max-duration=%v", options.LoadMaxDuration)) + } + + s = append(s, fmt.Sprintf("z=%v", options.Duration.Nanoseconds())) + s = append(s, fmt.Sprintf("timeout=%v", options.Timeout.Seconds())) + s = append(s, fmt.Sprintf("dial_timeout=%v", options.DialTimeout.Seconds())) + s = append(s, fmt.Sprintf("keepalive=%v", options.KeepaliveTime.Seconds())) + + dataStr := `""` + dataBytes, err := json.Marshal(options.Data) + if err == nil && len(dataBytes) > 0 { + dataBytes, err = json.Marshal(string(dataBytes)) + if err == nil { + dataStr = string(dataBytes) + } + } + + dataStr = cleanInfluxString(dataStr) + + s = append(s, fmt.Sprintf("data=%s", dataStr)) + + mdStr := `""` + if options.Metadata != nil { + mdBytes, err := json.Marshal(options.Metadata) + if err == nil { + mdBytes, err = json.Marshal(string(mdBytes)) + if err == nil { + mdStr = string(mdBytes) + } + } + + mdStr = cleanInfluxString(mdStr) + } + + s = append(s, fmt.Sprintf("metadata=%s", mdStr)) + + callTagsStr := `""` + if len(rp.Report.Tags) > 0 { + callTagsBytes, err := json.Marshal(rp.Report.Tags) + if err == nil { + callTagsBytes, err = json.Marshal(string(callTagsBytes)) + if err == nil { + callTagsStr = string(callTagsBytes) + } + } + + callTagsStr = cleanInfluxString(callTagsStr) + } + + s = append(s, fmt.Sprintf("tags=%s", callTagsStr)) + + if addErrors { + errCount := 0 + if len(rp.Report.ErrorDist) > 0 { + for _, v := range rp.Report.ErrorDist { + errCount += v + } + } + + s = append(s, fmt.Sprintf("errors=%v", errCount)) + + hasErrors := false + if errCount > 0 { + hasErrors = true + } + + s = append(s, fmt.Sprintf("has_errors=%v", hasErrors)) + } + + return strings.Join(s, ",") +} + +func (rp *ReportPrinter) getInfluxFields() string { + s := make([]string, 0, 5) + + s = append(s, fmt.Sprintf("count=%v", rp.Report.Count)) + s = append(s, fmt.Sprintf("total=%v", rp.Report.Total.Nanoseconds())) + s = append(s, fmt.Sprintf("average=%v", rp.Report.Average.Nanoseconds())) + s = append(s, fmt.Sprintf("fastest=%v", rp.Report.Fastest.Nanoseconds())) + s = append(s, fmt.Sprintf("slowest=%v", rp.Report.Slowest.Nanoseconds())) + s = append(s, fmt.Sprintf("rps=%4.2f", rp.Report.Rps)) + + if len(rp.Report.LatencyDistribution) > 0 { + for _, v := range rp.Report.LatencyDistribution { + if v.Percentage == 50 { + s = append(s, fmt.Sprintf("median=%v", v.Latency.Nanoseconds())) + } + + if v.Percentage == 95 { + s = append(s, fmt.Sprintf("p95=%v", v.Latency.Nanoseconds())) + } + } + } + + errCount := 0 + if len(rp.Report.ErrorDist) > 0 { + for _, v := range rp.Report.ErrorDist { + errCount += v + } + } + + s = append(s, fmt.Sprintf("errors=%v", errCount)) + + return strings.Join(s, ",") +} + +func cleanInfluxString(input string) string { + input = strings.Replace(input, " ", "\\ ", -1) + input = strings.Replace(input, ",", "\\,", -1) + input = strings.Replace(input, "=", "\\=", -1) + return input +} diff --git a/printer/printer_test.go b/printer/influx_test.go similarity index 96% rename from printer/printer_test.go rename to printer/influx_test.go index 8f23b6a9..1526d658 100644 --- a/printer/printer_test.go +++ b/printer/influx_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestPrinter_getInfluxLine(t *testing.T) { +func TestPrinter_printInfluxLine(t *testing.T) { date := time.Now() unixTimeNow := date.UnixNano() @@ -119,8 +119,11 @@ func TestPrinter_getInfluxLine(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - p := ReportPrinter{Report: &tt.report} - actual := p.getInfluxLine() + buf := bytes.NewBufferString("") + p := ReportPrinter{Report: &tt.report, Out: buf} + err := p.printInfluxLine() + assert.NoError(t, err) + actual := buf.String() assert.Equal(t, tt.expected, actual) }) } diff --git a/printer/printer.go b/printer/printer.go index c4ba00e6..1219a3f2 100644 --- a/printer/printer.go +++ b/printer/printer.go @@ -77,199 +77,16 @@ func (rp *ReportPrinter) Print(format string) error { } return rp.print(buf.String()) case "influx-summary": - return rp.print(rp.getInfluxLine()) + return rp.printInfluxLine() case "influx-details": return rp.printInfluxDetails() + case "prometheus": + return rp.printPrometheus() default: return fmt.Errorf("unknown format: %s", format) } } -func (rp *ReportPrinter) getInfluxLine() string { - measurement := "ghz_run" - tags := rp.getInfluxTags(true) - fields := rp.getInfluxFields() - timestamp := rp.Report.Date.UnixNano() - if timestamp < 0 { - timestamp = 0 - } - - return fmt.Sprintf("%v,%v %v %v", measurement, tags, fields, timestamp) -} - -func (rp *ReportPrinter) printInfluxDetails() error { - measurement := "ghz_detail" - commonTags := rp.getInfluxTags(false) - - for _, v := range rp.Report.Details { - values := make([]string, 3) - values[0] = fmt.Sprintf("latency=%v", v.Latency.Nanoseconds()) - values[1] = fmt.Sprintf(`error="%v"`, cleanInfluxString(v.Error)) - values[2] = fmt.Sprintf(`status="%v"`, v.Status) - - tags := commonTags - - if v.Error != "" { - tags = tags + ",hasError=true" - } else { - tags = tags + ",hasError=false" - } - - timestamp := v.Timestamp.UnixNano() - - fields := strings.Join(values, ",") - - if _, err := fmt.Fprintf(rp.Out, "%v,%v %v %v\n", measurement, tags, fields, timestamp); err != nil { - return err - } - } - return nil -} - -func (rp *ReportPrinter) getInfluxTags(addErrors bool) string { - s := make([]string, 0, 10) - - if rp.Report.Name != "" { - s = append(s, fmt.Sprintf(`name="%v"`, cleanInfluxString(strings.TrimSpace(rp.Report.Name)))) - } - - options := rp.Report.Options - - if options.Proto != "" { - s = append(s, fmt.Sprintf(`proto="%v"`, options.Proto)) - } else if options.Protoset != "" { - s = append(s, fmt.Sprintf(`Protoset="%v"`, options.Protoset)) - } - - s = append(s, fmt.Sprintf(`call="%v"`, options.Call)) - s = append(s, fmt.Sprintf(`host="%v"`, options.Host)) - s = append(s, fmt.Sprintf("n=%v", options.Total)) - - if options.CSchedule == "const" { - s = append(s, fmt.Sprintf("c=%v", options.Concurrency)) - } else { - s = append(s, fmt.Sprintf("concurrency-schedule=%v", options.CSchedule)) - s = append(s, fmt.Sprintf("concurrency-start=%v", options.CStart)) - s = append(s, fmt.Sprintf("concurrency-end=%v", options.CEnd)) - s = append(s, fmt.Sprintf("concurrency-step=%v", options.CStep)) - s = append(s, fmt.Sprintf("concurrency-step-duration=%v", options.CStepDuration)) - s = append(s, fmt.Sprintf("concurrency-max-duration=%v", options.CMaxDuration)) - } - - if options.LoadSchedule == "const" { - s = append(s, fmt.Sprintf("rps=%v", options.RPS)) - } else { - s = append(s, fmt.Sprintf("load-schedule=%v", options.LoadSchedule)) - s = append(s, fmt.Sprintf("load-start=%v", options.LoadStart)) - s = append(s, fmt.Sprintf("load-end=%v", options.LoadEnd)) - s = append(s, fmt.Sprintf("load-step=%v", options.LoadStep)) - s = append(s, fmt.Sprintf("load-step-duration=%v", options.LoadStepDuration)) - s = append(s, fmt.Sprintf("load-max-duration=%v", options.LoadMaxDuration)) - } - - s = append(s, fmt.Sprintf("z=%v", options.Duration.Nanoseconds())) - s = append(s, fmt.Sprintf("timeout=%v", options.Timeout.Seconds())) - s = append(s, fmt.Sprintf("dial_timeout=%v", options.DialTimeout.Seconds())) - s = append(s, fmt.Sprintf("keepalive=%v", options.KeepaliveTime.Seconds())) - - dataStr := `""` - dataBytes, err := json.Marshal(options.Data) - if err == nil && len(dataBytes) > 0 { - dataBytes, err = json.Marshal(string(dataBytes)) - if err == nil { - dataStr = string(dataBytes) - } - } - - dataStr = cleanInfluxString(dataStr) - - s = append(s, fmt.Sprintf("data=%s", dataStr)) - - mdStr := `""` - if options.Metadata != nil { - mdBytes, err := json.Marshal(options.Metadata) - if err == nil { - mdBytes, err = json.Marshal(string(mdBytes)) - if err == nil { - mdStr = string(mdBytes) - } - } - - mdStr = cleanInfluxString(mdStr) - } - - s = append(s, fmt.Sprintf("metadata=%s", mdStr)) - - callTagsStr := `""` - if len(rp.Report.Tags) > 0 { - callTagsBytes, err := json.Marshal(rp.Report.Tags) - if err == nil { - callTagsBytes, err = json.Marshal(string(callTagsBytes)) - if err == nil { - callTagsStr = string(callTagsBytes) - } - } - - callTagsStr = cleanInfluxString(callTagsStr) - } - - s = append(s, fmt.Sprintf("tags=%s", callTagsStr)) - - if addErrors { - errCount := 0 - if len(rp.Report.ErrorDist) > 0 { - for _, v := range rp.Report.ErrorDist { - errCount += v - } - } - - s = append(s, fmt.Sprintf("errors=%v", errCount)) - - hasErrors := false - if errCount > 0 { - hasErrors = true - } - - s = append(s, fmt.Sprintf("has_errors=%v", hasErrors)) - } - - return strings.Join(s, ",") -} - -func (rp *ReportPrinter) getInfluxFields() string { - s := make([]string, 0, 5) - - s = append(s, fmt.Sprintf("count=%v", rp.Report.Count)) - s = append(s, fmt.Sprintf("total=%v", rp.Report.Total.Nanoseconds())) - s = append(s, fmt.Sprintf("average=%v", rp.Report.Average.Nanoseconds())) - s = append(s, fmt.Sprintf("fastest=%v", rp.Report.Fastest.Nanoseconds())) - s = append(s, fmt.Sprintf("slowest=%v", rp.Report.Slowest.Nanoseconds())) - s = append(s, fmt.Sprintf("rps=%4.2f", rp.Report.Rps)) - - if len(rp.Report.LatencyDistribution) > 0 { - for _, v := range rp.Report.LatencyDistribution { - if v.Percentage == 50 { - s = append(s, fmt.Sprintf("median=%v", v.Latency.Nanoseconds())) - } - - if v.Percentage == 95 { - s = append(s, fmt.Sprintf("p95=%v", v.Latency.Nanoseconds())) - } - } - } - - errCount := 0 - if len(rp.Report.ErrorDist) > 0 { - for _, v := range rp.Report.ErrorDist { - errCount += v - } - } - - s = append(s, fmt.Sprintf("errors=%v", errCount)) - - return strings.Join(s, ",") -} - func (rp *ReportPrinter) print(s string) error { _, err := fmt.Fprint(rp.Out, s) return err @@ -412,10 +229,3 @@ func formatErrorDist(errDist map[string]int) string { _ = w.Flush() return buf.String() } - -func cleanInfluxString(input string) string { - input = strings.Replace(input, " ", "\\ ", -1) - input = strings.Replace(input, ",", "\\,", -1) - input = strings.Replace(input, "=", "\\=", -1) - return input -} diff --git a/printer/prometheus.go b/printer/prometheus.go new file mode 100644 index 00000000..27264555 --- /dev/null +++ b/printer/prometheus.go @@ -0,0 +1,299 @@ +package printer + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/bojand/ghz/runner" + promtypes "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md + +func (rp *ReportPrinter) printPrometheus() error { + encoder := expfmt.NewEncoder(rp.Out, expfmt.FmtText) + + labels, err := rp.getCommonPrometheusLabels() + if err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_count", + &promtypes.Gauge{Value: ptrFloat64(float64(rp.Report.Count))}); err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_total", &promtypes.Gauge{Value: ptrFloat64(float64(rp.Report.Total))}); err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_average", &promtypes.Gauge{Value: ptrFloat64(float64(rp.Report.Average.Nanoseconds()))}); err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_fastest", &promtypes.Gauge{Value: ptrFloat64(float64(rp.Report.Fastest.Nanoseconds()))}); err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_slowest", &promtypes.Gauge{Value: ptrFloat64(float64(rp.Report.Slowest.Nanoseconds()))}); err != nil { + return err + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_rps", &promtypes.Gauge{Value: ptrFloat64(rp.Report.Rps)}); err != nil { + return err + } + + // histogram + latencyName := "ghz_run_histogram" + metricType := promtypes.MetricType_HISTOGRAM + mf := promtypes.MetricFamily{ + Name: &latencyName, + Type: &metricType, + } + + metrics := make([]*promtypes.Metric, 0, 1) + + metrics = append(metrics, &promtypes.Metric{ + Label: labels, + Histogram: &promtypes.Histogram{ + SampleCount: &rp.Report.Count, + SampleSum: ptrFloat64(float64(rp.Report.Total.Nanoseconds())), + Bucket: make([]*promtypes.Bucket, 0, len(rp.Report.Histogram)), + }, + }) + + mf.Metric = append(mf.Metric, metrics...) + + for _, v := range rp.Report.Histogram { + metrics[0].Histogram.Bucket = append(metrics[0].Histogram.Bucket, + &promtypes.Bucket{ + CumulativeCount: ptrUint64(uint64(v.Count)), + UpperBound: ptrFloat64(v.Mark), + }) + } + + if err := encoder.Encode(&mf); err != nil { + return err + } + + // latency distribution + latencyName = "ghz_run_latency" + metricType = promtypes.MetricType_SUMMARY + mf = promtypes.MetricFamily{ + Name: &latencyName, + Type: &metricType, + } + + metrics = make([]*promtypes.Metric, 0, 1) + + metrics = append(metrics, &promtypes.Metric{ + Label: labels, + Summary: &promtypes.Summary{ + SampleCount: &rp.Report.Count, + SampleSum: ptrFloat64(float64(rp.Report.Total.Nanoseconds())), + Quantile: make([]*promtypes.Quantile, 0, len(rp.Report.LatencyDistribution)), + }, + }) + + mf.Metric = append(mf.Metric, metrics...) + + for _, v := range rp.Report.LatencyDistribution { + metrics[0].Summary.Quantile = append(metrics[0].Summary.Quantile, + &promtypes.Quantile{ + Quantile: ptrFloat64(float64(v.Percentage) / 100.0), + Value: ptrFloat64(float64(v.Latency.Nanoseconds())), + }) + } + + if err := encoder.Encode(&mf); err != nil { + return err + } + + // errors + errCount := 0 + for _, v := range rp.Report.ErrorDist { + errCount += v + } + + if err := rp.printPrometheusMetricGauge( + encoder, labels, + "ghz_run_errors", &promtypes.Gauge{Value: ptrFloat64(float64(errCount))}); err != nil { + return err + } + + return nil +} + +func (rp *ReportPrinter) printPrometheusMetricGauge( + encoder expfmt.Encoder, labels []*promtypes.LabelPair, + name string, value *promtypes.Gauge) error { + metricType := promtypes.MetricType_GAUGE + mf := promtypes.MetricFamily{ + Name: &name, + Type: &metricType, + } + + metrics := make([]*promtypes.Metric, 0, 1) + + metrics = append(metrics, &promtypes.Metric{ + Label: labels, + Gauge: value, + }) + + mf.Metric = append(mf.Metric, metrics...) + + return encoder.Encode(&mf) +} + +func (rp *ReportPrinter) getCommonPrometheusLabels() ([]*promtypes.LabelPair, error) { + labels := make([]*promtypes.LabelPair, 0, len(rp.Report.Tags)+5) + + labels = append(labels, + &promtypes.LabelPair{ + Name: ptrString("name"), + Value: &rp.Report.Name, + }, + &promtypes.LabelPair{ + Name: ptrString("end_reason"), + Value: ptrString(string(rp.Report.EndReason)), + }, + ) + + options := map[string]string{} + type Alias runner.Options + j, err := json.Marshal(&struct { + ImportPaths string `json:"import-paths"` + SkipTLS string `json:"skipTLS,omitempty"` + Insecure string `json:"insecure"` + Async string `json:"async,omitempty"` + Binary string `json:"binary"` + CountErrors string `json:"count-errors,omitempty"` + RPS string `json:"rps,omitempty"` + LoadStart string `json:"load-start"` + LoadEnd string `json:"load-end"` + LoadStep string `json:"load-step"` + Concurrency string `json:"concurrency,omitempty"` + CStart string `json:"concurrency-start"` + CEnd string `json:"concurrency-end"` + CStep string `json:"concurrency-step"` + Total string `json:"total,omitempty"` + Connections string `json:"connections,omitempty"` + CPUs string `json:"CPUs"` + SkipFirst string `json:"skipFirst,omitempty"` + Data string `json:"data,omitempty"` + Metadata string `json:"metadata,omitempty"` + LoadStepDuration string `json:"load-step-duration"` + LoadMaxDuration string `json:"load-max-duration"` + CStepDuration string `json:"concurrency-step-duration"` + CMaxDuration string `json:"concurrency-max-duration"` + Duration string `json:"duration,omitempty"` + Timeout string `json:"timeout,omitempty"` + DialTimeout string `json:"dial-timeout,omitempty"` + KeepaliveTime string `json:"keepalive,omitempty"` + *Alias + }{ + ImportPaths: strings.Join(rp.Report.Options.ImportPaths, ","), + SkipTLS: *ptrBoolToStr(rp.Report.Options.SkipTLS), + Insecure: *ptrBoolToStr(rp.Report.Options.Insecure), + Async: *ptrBoolToStr(rp.Report.Options.Async), + Binary: *ptrBoolToStr(rp.Report.Options.Binary), + CountErrors: *ptrBoolToStr(rp.Report.Options.CountErrors), + RPS: *ptrString(strconv.Itoa(rp.Report.Options.RPS)), + LoadStart: *ptrString(strconv.Itoa(rp.Report.Options.LoadStart)), + LoadEnd: *ptrString(strconv.Itoa(rp.Report.Options.LoadEnd)), + LoadStep: *ptrString(strconv.Itoa(rp.Report.Options.LoadStep)), + Concurrency: *ptrString(strconv.Itoa(rp.Report.Options.Concurrency)), + CStart: *ptrString(strconv.Itoa(rp.Report.Options.CStart)), + CEnd: *ptrString(strconv.Itoa(rp.Report.Options.CEnd)), + CStep: *ptrString(strconv.Itoa(rp.Report.Options.CStep)), + Total: *ptrString(strconv.Itoa(rp.Report.Options.Total)), + Connections: *ptrString(strconv.Itoa(rp.Report.Options.Connections)), + CPUs: *ptrString(strconv.Itoa(rp.Report.Options.CPUs)), + SkipFirst: *ptrString(strconv.Itoa(rp.Report.Options.SkipFirst)), + Data: "", + Metadata: "", + LoadStepDuration: *ptrString(strconv.Itoa(int(rp.Report.Options.LoadStepDuration.Nanoseconds()))), + LoadMaxDuration: *ptrString(strconv.Itoa(int(rp.Report.Options.LoadMaxDuration.Nanoseconds()))), + CStepDuration: *ptrString(strconv.Itoa(int(rp.Report.Options.CStepDuration.Nanoseconds()))), + CMaxDuration: *ptrString(strconv.Itoa(int(rp.Report.Options.CMaxDuration.Nanoseconds()))), + Duration: *ptrString(strconv.Itoa(int(rp.Report.Options.Duration.Nanoseconds()))), + Timeout: *ptrString(strconv.Itoa(int(rp.Report.Options.Timeout.Nanoseconds()))), + DialTimeout: *ptrString(strconv.Itoa(int(rp.Report.Options.DialTimeout.Nanoseconds()))), + KeepaliveTime: *ptrString(strconv.Itoa(int(rp.Report.Options.KeepaliveTime.Nanoseconds()))), + Alias: (*Alias)(&rp.Report.Options), + }) + if err != nil { + return nil, err + } + + err = json.Unmarshal(j, &options) + if err != nil { + return nil, err + } + + if rp.Report.Options.CSchedule == "const" { + delete(options, "concurrency-schedule") + delete(options, "concurrency-start") + delete(options, "concurrency-end") + delete(options, "concurrency-step") + delete(options, "concurrency-step-duration") + delete(options, "concurrency-max-duration") + } + + if rp.Report.Options.LoadSchedule == "const" { + delete(options, "load-schedule") + delete(options, "load-start") + delete(options, "load-end") + delete(options, "load-step") + delete(options, "load-step-duration") + delete(options, "load-max-duration") + } + + for k, v := range options { + k, v := k, v + + k = strings.Replace(k, "-", "_", -1) + + labels = append(labels, &promtypes.LabelPair{ + Name: &k, + Value: &v, + }) + } + + return labels, nil +} + +func ptrUint64(v uint64) *uint64 { + return &v +} + +func ptrFloat64(v float64) *float64 { + return &v +} + +func ptrString(v string) *string { + return &v +} + +func ptrBoolToStr(v bool) *string { + switch v { + case true: + return ptrString("true") + default: + return ptrString("false") + } +} diff --git a/printer/prometheus_test.go b/printer/prometheus_test.go new file mode 100644 index 00000000..ac0cf58e --- /dev/null +++ b/printer/prometheus_test.go @@ -0,0 +1,229 @@ +package printer + +import ( + "bytes" + "io" + "sort" + "testing" + "time" + + "github.com/bojand/ghz/runner" + promtypes "github.com/prometheus/client_model/go" + expfmt "github.com/prometheus/common/expfmt" + "github.com/stretchr/testify/assert" +) + +func TestPrinter_printPrometheus(t *testing.T) { + date := time.Now() + + var tests = []struct { + name string + report runner.Report + expected string + }{ + { + "basic", + runner.Report{ + Name: "run name", + EndReason: runner.ReasonNormalEnd, + Date: date, + Count: 200, + Total: time.Duration(2 * time.Second), + Average: time.Duration(10 * time.Millisecond), + Fastest: time.Duration(1 * time.Millisecond), + Slowest: time.Duration(100 * time.Millisecond), + Rps: 2000, + ErrorDist: map[string]int{ + "rpc error: code = Internal desc = Internal error.": 3, + "rpc error: code = DeadlineExceeded desc = Deadline exceeded.": 2}, + StatusCodeDist: map[string]int{ + "OK": 195, + "Internal": 3, + "DeadlineExceeded": 2}, + Options: runner.Options{ + Call: "helloworld.Greeter.SayHello", + Proto: "/apis/greeter.proto", + Host: "0.0.0.0:50051", + LoadSchedule: "const", + CSchedule: "const", + Total: 200, + Concurrency: 50, + Data: map[string]interface{}{ + "name": "Bob Smith", + }, + Metadata: &map[string]string{ + "foo bar": "biz baz", + }, + }, + LatencyDistribution: []runner.LatencyDistribution{ + { + Percentage: 25, + Latency: time.Duration(1 * time.Millisecond), + }, + { + Percentage: 50, + Latency: time.Duration(5 * time.Millisecond), + }, + { + Percentage: 75, + Latency: time.Duration(10 * time.Millisecond), + }, + { + Percentage: 90, + Latency: time.Duration(15 * time.Millisecond), + }, + { + Percentage: 95, + Latency: time.Duration(20 * time.Millisecond), + }, + { + Percentage: 99, + Latency: time.Duration(25 * time.Millisecond), + }}, + Histogram: []runner.Bucket{ + { + Mark: 0.01, + Count: 1, + Frequency: 0.005, + }, + { + Mark: 0.02, + Count: 10, + Frequency: 0.01, + }, + { + Mark: 0.03, + Count: 50, + Frequency: 0.1, + }, + { + Mark: 0.05, + Count: 60, + Frequency: 0.15, + }, + { + Mark: 0.1, + Count: 15, + Frequency: 0.07, + }, + }, + Details: []runner.ResultDetail{ + { + Timestamp: date, + Latency: time.Duration(1 * time.Millisecond), + Status: "OK", + }, + }, + }, + expectedProm, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := bytes.NewBufferString("") + p := ReportPrinter{Report: &tt.report, Out: buf} + err := p.printPrometheus() + assert.NoError(t, err) + actual := buf.String() + + // parse actual + var actualMetricFamilies []*promtypes.MetricFamily + r := bytes.NewReader([]byte(actual)) + decoder := expfmt.NewDecoder(r, expfmt.FmtText) + for { + metric := &promtypes.MetricFamily{} + err := decoder.Decode(metric) + if err != nil { + if err == io.EOF { + break + } + assert.NoError(t, err) + } + + actualMetricFamilies = append(actualMetricFamilies, metric) + } + + // parse expected + var expectedMetricFamilies []*promtypes.MetricFamily + r = bytes.NewReader([]byte(tt.expected)) + decoder = expfmt.NewDecoder(r, expfmt.FmtText) + for { + metric := &promtypes.MetricFamily{} + err := decoder.Decode(metric) + if err != nil { + if err == io.EOF { + break + } + assert.NoError(t, err) + } + + expectedMetricFamilies = append(expectedMetricFamilies, metric) + } + + for i, amf := range actualMetricFamilies { + amf := amf + + assert.True(t, i < len(expectedMetricFamilies)) + + emf := expectedMetricFamilies[i] + + for im, am := range amf.Metric { + am := am + + assert.True(t, im < len(emf.Metric)) + + em := emf.Metric[im] + + assert.NotNil(t, em) + + // sort actual labels + al := am.Label + sort.Slice(al, func(i, j int) bool { return *(al[i].Name) < *(al[j].Name) }) + + // sort expected labels + el := em.Label + sort.Slice(el, func(i, j int) bool { return *(el[i].Name) < *(el[j].Name) }) + + // finally compare labels + assert.Equal(t, el, al) + } + } + }) + } +} + +var expectedProm string = ` +# TYPE ghz_run_count gauge +ghz_run_count{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 200 +# TYPE ghz_run_total gauge +ghz_run_total{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 2e+09 +# TYPE ghz_run_average gauge +ghz_run_average{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 1e+07 +# TYPE ghz_run_fastest gauge +ghz_run_fastest{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 1e+06 +# TYPE ghz_run_slowest gauge +ghz_run_slowest{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 1e+08 +# TYPE ghz_run_rps gauge +ghz_run_rps{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 2000 +# TYPE ghz_run_histogram histogram +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="0.01"} 1 +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="0.02"} 10 +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="0.03"} 50 +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="0.05"} 60 +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="0.1"} 15 +ghz_run_histogram_bucket{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",le="+Inf"} 200 +ghz_run_histogram_sum{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 2e+09 +ghz_run_histogram_count{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 200 +# TYPE ghz_run_latency summary +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.25"} 1e+06 +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.5"} 5e+06 +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.75"} 1e+07 +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.9"} 1.5e+07 +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.95"} 2e+07 +ghz_run_latency{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0",quantile="0.99"} 2.5e+07 +ghz_run_latency_sum{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 2e+09 +ghz_run_latency_count{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 200 +# TYPE ghz_run_errors gauge +ghz_run_errors{name="run name",end_reason="normal",insecure="false",rps="0",connections="0",keepalive="0",skipFirst="0",dial_timeout="0",proto="/apis/greeter.proto",concurrency="50",call="helloworld.Greeter.SayHello",import_paths="",async="false",binary="false",total="200",host="0.0.0.0:50051",skipTLS="false",CPUs="0",timeout="0",count_errors="false",duration="0"} 5 +` diff --git a/runner/reporter.go b/runner/reporter.go index 9f3c8fd6..721cb083 100644 --- a/runner/reporter.go +++ b/runner/reporter.go @@ -40,26 +40,26 @@ type Options struct { Insecure bool `json:"insecure"` Authority string `json:"authority,omitempty"` - RPS uint `json:"rps,omitempty"` + RPS int `json:"rps,omitempty"` LoadSchedule string `json:"load-schedule"` - LoadStart uint `json:"load-start"` - LoadEnd uint `json:"load-end"` + LoadStart int `json:"load-start"` + LoadEnd int `json:"load-end"` LoadStep int `json:"load-step"` LoadStepDuration time.Duration `json:"load-step-duration"` LoadMaxDuration time.Duration `json:"load-max-duration"` - Concurrency uint `json:"concurrency,omitempty"` + Concurrency int `json:"concurrency,omitempty"` CSchedule string `json:"concurrency-schedule"` - CStart uint `json:"concurrency-start"` - CEnd uint `json:"concurrency-end"` + CStart int `json:"concurrency-start"` + CEnd int `json:"concurrency-end"` CStep int `json:"concurrency-step"` CStepDuration time.Duration `json:"concurrency-step-duration"` CMaxDuration time.Duration `json:"concurrency-max-duration"` - Total uint `json:"total,omitempty"` + Total int `json:"total,omitempty"` Async bool `json:"async,omitempty"` - Connections uint `json:"connections,omitempty"` + Connections int `json:"connections,omitempty"` Duration time.Duration `json:"duration,omitempty"` Timeout time.Duration `json:"timeout,omitempty"` DialTimeout time.Duration `json:"dial-timeout,omitempty"` @@ -72,7 +72,7 @@ type Options struct { CPUs int `json:"CPUs"` Name string `json:"name,omitempty"` - SkipFirst uint `json:"skipFirst,omitempty"` + SkipFirst int `json:"skipFirst,omitempty"` CountErrors bool `json:"count-errors,omitempty"` } @@ -80,9 +80,8 @@ type Options struct { type Report struct { Name string `json:"name,omitempty"` EndReason StopReason `json:"endReason,omitempty"` - - Options Options `json:"options,omitempty"` - Date time.Time `json:"date"` + Date time.Time `json:"date"` + Options Options `json:"options,omitempty"` Count uint64 `json:"count"` Total time.Duration `json:"total"` @@ -213,26 +212,26 @@ func (r *Reporter) Finalize(stopReason StopReason, total time.Duration) *Report Insecure: r.config.insecure, Authority: r.config.authority, - RPS: uint(r.config.rps), + RPS: r.config.rps, LoadSchedule: r.config.loadSchedule, - LoadStart: r.config.loadStart, - LoadEnd: r.config.loadEnd, + LoadStart: int(r.config.loadStart), + LoadEnd: int(r.config.loadEnd), LoadStep: r.config.loadStep, LoadStepDuration: r.config.loadStepDuration, LoadMaxDuration: r.config.loadDuration, - Concurrency: uint(r.config.c), + Concurrency: r.config.c, CSchedule: r.config.cSchedule, - CStart: r.config.cStart, - CEnd: r.config.cEnd, + CStart: int(r.config.cStart), + CEnd: int(r.config.cEnd), CStep: r.config.cStep, CStepDuration: r.config.cStepDuration, CMaxDuration: r.config.cMaxDuration, - Total: uint(r.config.n), + Total: r.config.n, Async: r.config.async, - Connections: uint(r.config.nConns), + Connections: r.config.nConns, Duration: r.config.z, Timeout: r.config.timeout, DialTimeout: r.config.dialTimeout, @@ -241,7 +240,7 @@ func (r *Reporter) Finalize(stopReason StopReason, total time.Duration) *Report Binary: r.config.binary, CPUs: r.config.cpus, Name: r.config.name, - SkipFirst: uint(r.config.skipFirst), + SkipFirst: r.config.skipFirst, CountErrors: r.config.countErrors, } diff --git a/testdata/hello.proto b/testdata/hello.proto new file mode 100644 index 00000000..3a2a4818 --- /dev/null +++ b/testdata/hello.proto @@ -0,0 +1,20 @@ +// based on https://grpc.io/docs/guides/concepts.html + +syntax = "proto2"; + +package hello; + +service HelloService { + rpc SayHello(HelloRequest) returns (HelloResponse); + rpc LotsOfReplies(HelloRequest) returns (stream HelloResponse); + rpc LotsOfGreetings(stream HelloRequest) returns (HelloResponse); + rpc BidiHello(stream HelloRequest) returns (stream HelloResponse); +} + +message HelloRequest { + optional string greeting = 1; +} + +message HelloResponse { + required string reply = 1; +} \ No newline at end of file diff --git a/www/docs/options.md b/www/docs/options.md index 3a847a43..27f1d25a 100644 --- a/www/docs/options.md +++ b/www/docs/options.md @@ -331,6 +331,7 @@ Output type. If none provided, a summary is printed. - `"html"` - outputs the metrics report as HTML. - `"influx-summary"` - outputs the metrics summary as InfluxDB line protocol. - `"influx-details"` - outputs the metrics details as InfluxDB line protocol. +- `"prometheus"` - outputs the metrics summary in Prometheus exposition format. See [output formats page](output.md) for details. diff --git a/www/docs/output.md b/www/docs/output.md index 80aaf38f..ac7c1f5f 100644 --- a/www/docs/output.md +++ b/www/docs/output.md @@ -87,6 +87,11 @@ HTML output can be generated using `html` as format in the `-O` option. [Sample Using `-O json` outputs JSON data, and `-O pretty` outputs JSON in pretty format. [Sample pretty JSON output](/pretty.json). +### Prometheus + +Using `-O prometheus` outputs the summary data as [Prometheus text exposition format +](https://prometheus.io/docs/instrumenting/exposition_formats/). [Sample Prometheus output](/prometheus.txt). + ### InfluxDB Line Protocol Using `-O influx-summary` outputs the summary data as [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.6/concepts/glossary/#line-protocol). Sample output: diff --git a/www/website/static/prometheus.txt b/www/website/static/prometheus.txt new file mode 100644 index 00000000..382344fe --- /dev/null +++ b/www/website/static/prometheus.txt @@ -0,0 +1,39 @@ +# TYPE ghz_run_count gauge +ghz_run_count{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 200 +# TYPE ghz_run_total gauge +ghz_run_total{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 1.126828629e+09 +# TYPE ghz_run_average gauge +ghz_run_average{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 2.80369553e+08 +# TYPE ghz_run_fastest gauge +ghz_run_fastest{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 1.16979031e+08 +# TYPE ghz_run_slowest gauge +ghz_run_slowest{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 7.61653632e+08 +# TYPE ghz_run_rps gauge +ghz_run_rps{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 177.48927818552966 +# TYPE ghz_run_histogram histogram +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.116979031"} 1 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.1814464911"} 149 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.24591395120000004"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.3103814113"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.37484887140000006"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.4393163315000001"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.5037837916000001"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.5682512517"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.6327187118"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.6971861719000001"} 0 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="0.761653632"} 50 +ghz_run_histogram_bucket{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",le="+Inf"} 200 +ghz_run_histogram_sum{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 1.126828629e+09 +ghz_run_histogram_count{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 200 +# TYPE ghz_run_latency summary +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.1"} 1.18117467e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.25"} 1.19106794e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.5"} 1.20755652e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.75"} 1.22163651e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.9"} 7.6128053e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.95"} 7.61447819e+08 +ghz_run_latency{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051",quantile="0.99"} 7.61648317e+08 +ghz_run_latency_sum{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 1.126828629e+09 +ghz_run_latency_count{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 200 +# TYPE ghz_run_errors gauge +ghz_run_errors{name="",end_reason="normal",skipTLS="false",CPUs="12",call="helloworld.Greeter.SayHello",proto="./testdata/hello.proto",keepalive="0",concurrency="50",timeout="20000000000",rps="0",total="200",import-paths="testdata,.",async="false",count-errors="false",duration="0",insecure="true",skipFirst="0",connections="1",dial-timeout="10000000000",binary="false",host="0.0.0.0:50051"} 0 \ No newline at end of file